/** * SpillManager takes care of spilling and loading tuples from spilled data structs * @param numSpillFiles * @param serverAggregators */ public SpillManager(int numSpillFiles, ServerAggregators serverAggregators, Configuration conf, SpillableGroupByCache.QueryCache cache) { try { int estValueSize = serverAggregators.getEstimatedByteSize(); spillMaps = Lists.newArrayList(); this.numSpillFiles = numSpillFiles; this.aggregators = serverAggregators; this.conf = conf; File spillFilesDir = conf.get(QueryServices.SPOOL_DIRECTORY) != null ? new File(conf.get(QueryServices.SPOOL_DIRECTORY)) : null; // Ensure that a single element fits onto a page!!! Preconditions.checkArgument(SpillFile.DEFAULT_PAGE_SIZE > estValueSize); // Create a list of spillFiles // Each Spillfile only handles up to 2GB data for (int i = 0; i < numSpillFiles; i++) { SpillFile file = SpillFile.createSpillFile(spillFilesDir); spillMaps.add(new SpillMap(file, SpillFile.DEFAULT_PAGE_SIZE, estValueSize, cache)); } } catch (IOException ioe) { throw new RuntimeException("Could not init the SpillManager"); } }
InMemoryGroupByCache(RegionCoprocessorEnvironment env, ImmutableBytesPtr tenantId, byte[] customAnnotations, ServerAggregators aggregators, int estDistVals) { int estValueSize = aggregators.getEstimatedByteSize(); long estSize = sizeOfUnorderedGroupByMap(estDistVals, estValueSize); TenantCache tenantCache = GlobalCache.getTenantCache(env, tenantId); this.env = env; this.estDistVals = estDistVals; this.aggregators = aggregators; this.aggregateMap = Maps.newHashMapWithExpectedSize(estDistVals); this.chunk = tenantCache.getMemoryManager().allocate(estSize); this.customAnnotations = customAnnotations; }
this.env = env; final int estValueSize = aggregators.getEstimatedByteSize(); final TenantCache tenantCache = GlobalCache.getTenantCache(env, tenantId);
@Override public RegionScanner getScanner(final RegionScanner s) { long estSize = sizeOfUnorderedGroupByMap(aggregateMap.size(), aggregators.getEstimatedByteSize()); chunk.resize(estSize);
@Override public Aggregator[] cache(ImmutableBytesPtr cacheKey) { ImmutableBytesPtr key = new ImmutableBytesPtr(cacheKey); Aggregator[] rowAggregators = aggregateMap.get(key); if (rowAggregators == null) { // If Aggregators not found for this distinct // value, clone our original one (we need one // per distinct value) if (logger.isDebugEnabled()) { logger.debug(LogUtil.addCustomAnnotations("Adding new aggregate bucket for row key " + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength()), customAnnotations)); } rowAggregators = aggregators.newAggregators(env.getConfiguration()); aggregateMap.put(key, rowAggregators); if (aggregateMap.size() > estDistVals) { // increase allocation estDistVals *= 1.5f; long estSize = sizeOfUnorderedGroupByMap(estDistVals, aggregators.getEstimatedByteSize()); chunk.resize(estSize); } } return rowAggregators; }
/** * SpillManager takes care of spilling and loading tuples from spilled data structs * @param numSpillFiles * @param serverAggregators */ public SpillManager(int numSpillFiles, ServerAggregators serverAggregators, Configuration conf, SpillableGroupByCache.QueryCache cache) { try { int estValueSize = serverAggregators.getEstimatedByteSize(); spillMaps = Lists.newArrayList(); this.numSpillFiles = numSpillFiles; this.aggregators = serverAggregators; this.conf = conf; File spillFilesDir = conf.get(QueryServices.SPOOL_DIRECTORY) != null ? new File(conf.get(QueryServices.SPOOL_DIRECTORY)) : null; // Ensure that a single element fits onto a page!!! Preconditions.checkArgument(SpillFile.DEFAULT_PAGE_SIZE > estValueSize); // Create a list of spillFiles // Each Spillfile only handles up to 2GB data for (int i = 0; i < numSpillFiles; i++) { SpillFile file = SpillFile.createSpillFile(spillFilesDir); spillMaps.add(new SpillMap(file, SpillFile.DEFAULT_PAGE_SIZE, estValueSize, cache)); } } catch (IOException ioe) { throw new RuntimeException("Could not init the SpillManager"); } }
/** * SpillManager takes care of spilling and loading tuples from spilled data structs * @param numSpillFiles * @param serverAggregators */ public SpillManager(int numSpillFiles, ServerAggregators serverAggregators, Configuration conf, SpillableGroupByCache.QueryCache cache) { try { int estValueSize = serverAggregators.getEstimatedByteSize(); spillMaps = Lists.newArrayList(); this.numSpillFiles = numSpillFiles; this.aggregators = serverAggregators; this.conf = conf; File spillFilesDir = conf.get(QueryServices.SPOOL_DIRECTORY) != null ? new File(conf.get(QueryServices.SPOOL_DIRECTORY)) : null; // Ensure that a single element fits onto a page!!! Preconditions.checkArgument(SpillFile.DEFAULT_PAGE_SIZE > estValueSize); // Create a list of spillFiles // Each Spillfile only handles up to 2GB data for (int i = 0; i < numSpillFiles; i++) { SpillFile file = SpillFile.createSpillFile(spillFilesDir); spillMaps.add(new SpillMap(file, SpillFile.DEFAULT_PAGE_SIZE, estValueSize, cache)); } } catch (IOException ioe) { throw new RuntimeException("Could not init the SpillManager"); } }
InMemoryGroupByCache(RegionCoprocessorEnvironment env, ImmutableBytesPtr tenantId, byte[] customAnnotations, ServerAggregators aggregators, int estDistVals) { int estValueSize = aggregators.getEstimatedByteSize(); long estSize = sizeOfUnorderedGroupByMap(estDistVals, estValueSize); TenantCache tenantCache = GlobalCache.getTenantCache(env, tenantId); this.env = env; this.estDistVals = estDistVals; this.aggregators = aggregators; this.aggregateMap = Maps.newHashMapWithExpectedSize(estDistVals); this.chunk = tenantCache.getMemoryManager().allocate(estSize); this.customAnnotations = customAnnotations; }
InMemoryGroupByCache(RegionCoprocessorEnvironment env, ImmutableBytesPtr tenantId, byte[] customAnnotations, ServerAggregators aggregators, int estDistVals) { int estValueSize = aggregators.getEstimatedByteSize(); long estSize = sizeOfUnorderedGroupByMap(estDistVals, estValueSize); TenantCache tenantCache = GlobalCache.getTenantCache(env, tenantId); this.env = env; this.estDistVals = estDistVals; this.aggregators = aggregators; this.aggregateMap = Maps.newHashMapWithExpectedSize(estDistVals); this.chunk = tenantCache.getMemoryManager().allocate(estSize); this.customAnnotations = customAnnotations; }
this.env = env; final int estValueSize = aggregators.getEstimatedByteSize(); final TenantCache tenantCache = GlobalCache.getTenantCache(env, tenantId);
this.env = env; final int estValueSize = aggregators.getEstimatedByteSize(); final TenantCache tenantCache = GlobalCache.getTenantCache(env, tenantId);
@Override public RegionScanner getScanner(final RegionScanner s) { long estSize = sizeOfUnorderedGroupByMap(aggregateMap.size(), aggregators.getEstimatedByteSize()); chunk.resize(estSize);
@Override public RegionScanner getScanner(final RegionScanner s) { long estSize = sizeOfUnorderedGroupByMap(aggregateMap.size(), aggregators.getEstimatedByteSize()); chunk.resize(estSize);
@Override public Aggregator[] cache(ImmutableBytesPtr cacheKey) { ImmutableBytesPtr key = new ImmutableBytesPtr(cacheKey); Aggregator[] rowAggregators = aggregateMap.get(key); if (rowAggregators == null) { // If Aggregators not found for this distinct // value, clone our original one (we need one // per distinct value) if (logger.isDebugEnabled()) { logger.debug(LogUtil.addCustomAnnotations("Adding new aggregate bucket for row key " + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength()), customAnnotations)); } rowAggregators = aggregators.newAggregators(env.getConfiguration()); aggregateMap.put(key, rowAggregators); if (aggregateMap.size() > estDistVals) { // increase allocation estDistVals *= 1.5f; long estSize = sizeOfUnorderedGroupByMap(estDistVals, aggregators.getEstimatedByteSize()); chunk.resize(estSize); } } return rowAggregators; }
@Override public Aggregator[] cache(ImmutableBytesPtr cacheKey) { ImmutableBytesPtr key = new ImmutableBytesPtr(cacheKey); Aggregator[] rowAggregators = aggregateMap.get(key); if (rowAggregators == null) { // If Aggregators not found for this distinct // value, clone our original one (we need one // per distinct value) if (logger.isDebugEnabled()) { logger.debug(LogUtil.addCustomAnnotations("Adding new aggregate bucket for row key " + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength()), customAnnotations)); } rowAggregators = aggregators.newAggregators(env.getConfiguration()); aggregateMap.put(key, rowAggregators); if (aggregateMap.size() > estDistVals) { // increase allocation estDistVals *= 1.5f; long estSize = sizeOfUnorderedGroupByMap(estDistVals, aggregators.getEstimatedByteSize()); chunk.resize(estSize); } } return rowAggregators; }