final GroupByQueryConfig newConfig = new GroupByQueryConfig(); newConfig.defaultStrategy = query.getContextValue(CTX_KEY_STRATEGY, getDefaultStrategy()); newConfig.singleThreaded = query.getContextBoolean(CTX_KEY_IS_SINGLE_THREADED, isSingleThreaded()); newConfig.maxIntermediateRows = Math.min( query.getContextValue(CTX_KEY_MAX_INTERMEDIATE_ROWS, getMaxIntermediateRows()), getMaxIntermediateRows() ); newConfig.maxResults = Math.min( query.getContextValue(CTX_KEY_MAX_RESULTS, getMaxResults()), getMaxResults() ); newConfig.bufferGrouperMaxSize = Math.min( query.getContextValue(CTX_KEY_BUFFER_GROUPER_MAX_SIZE, getBufferGrouperMaxSize()), getBufferGrouperMaxSize() ); newConfig.bufferGrouperMaxLoadFactor = query.getContextValue( CTX_KEY_BUFFER_GROUPER_MAX_LOAD_FACTOR, getBufferGrouperMaxLoadFactor() ); newConfig.bufferGrouperInitialBuckets = query.getContextValue( CTX_KEY_BUFFER_GROUPER_INITIAL_BUCKETS, getBufferGrouperInitialBuckets() ); newConfig.maxOnDiskStorage = Math.min( ((Number) query.getContextValue(CTX_KEY_MAX_ON_DISK_STORAGE, getMaxOnDiskStorage())).longValue(), getMaxOnDiskStorage() ); newConfig.maxMergingDictionarySize = Math.min( ((Number) query.getContextValue(CTX_KEY_MAX_MERGING_DICTIONARY_SIZE, getMaxMergingDictionarySize())).longValue(),
@Override public QueryRunner<Row> preMergeQueryDecoration(QueryRunner<Row> runner) { return new SubqueryQueryRunner<Row>( new IntervalChunkingQueryRunner<Row>(runner, configSupplier.get().getChunkPeriod())); } }
public GroupByStrategy strategize(GroupByQuery query) { final String strategyString = config.withOverrides(query).getDefaultStrategy(); switch (strategyString) { case STRATEGY_V2: return strategyV2; case STRATEGY_V1: return strategyV1; default: throw new ISE("No such strategy[%s]", strategyString); } } }
@Override protected Grouper<ByteBuffer> newGrouper() { return new BufferHashGrouper<>( Suppliers.ofInstance(buffer), keySerde, cursor.getColumnSelectorFactory(), query.getAggregatorSpecs() .toArray(new AggregatorFactory[query.getAggregatorSpecs().size()]), querySpecificConfig.getBufferGrouperMaxSize(), querySpecificConfig.getBufferGrouperMaxLoadFactor(), querySpecificConfig.getBufferGrouperInitialBuckets(), true ); }
columnSelectorFactory, aggregatorFactories, groupByQueryConfig.getBufferGrouperMaxSize(), groupByQueryConfig.getBufferGrouperMaxLoadFactor(), groupByQueryConfig.getBufferGrouperInitialBuckets(), temporaryStorage, spillMapper, hasQueryTimeout, queryTimeoutAt, groupByQueryConfig.getIntermediateCombineDegree(), groupByQueryConfig.getNumParallelCombineThreads() );
final GroupByQueryConfig querySpecificConfig = config.withOverrides(query); final boolean includeTimestamp = GroupByStrategyV2.getUniversalTimestamp(query) == null; query.getContextSortByDimsFirst(), query.getDimensions(), querySpecificConfig.getMaxMergingDictionarySize() / (concurrencyHint == -1 ? 1 : concurrencyHint), valueTypes, aggregatorFactories, columnSelectorFactory, aggregatorFactories, querySpecificConfig.getBufferGrouperMaxSize(), querySpecificConfig.getBufferGrouperMaxLoadFactor(), querySpecificConfig.getBufferGrouperInitialBuckets(), temporaryStorage, spillMapper, query.getContextSortByDimsFirst(), query.getDimensions(), querySpecificConfig.getMaxMergingDictionarySize(), // use entire dictionary space for combining key serde valueTypes, aggregatorFactories,
final GroupByQueryConfig querySpecificConfig = configSupplier.get().withOverrides(query); final boolean isSingleThreaded = querySpecificConfig.isSingleThreaded(); final Pair<IncrementalIndex, Accumulator<IncrementalIndex, T>> indexAccumulatorPair = GroupByQueryHelper.createIndexAccumulatorPair( query,
private static GroupByQueryRunnerFactory getGroupByQueryRunnerFactory() { ObjectMapper mapper = new DefaultObjectMapper(); GroupByQueryConfig config = new GroupByQueryConfig(); config.setMaxIntermediateRows(10000); Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config); GroupByQueryEngine engine = new GroupByQueryEngine(configSupplier, Utils.getBufferPool()); GroupByQueryRunnerFactory factory = new GroupByQueryRunnerFactory(engine, Utils.NOOP_QUERYWATCHER, configSupplier, new GroupByQueryQueryToolChest(configSupplier, mapper, engine, Utils.getBufferPool(), Utils.NoopIntervalChunkingQueryRunnerDecorator()), Utils.getBufferPool()); return factory; }
public RowIterator(GroupByQuery query, final Cursor cursor, ByteBuffer metricsBuffer, GroupByQueryConfig config) final GroupByQueryConfig querySpecificConfig = config.withOverrides(query); this.maxIntermediateRows = querySpecificConfig.getMaxIntermediateRows();
final GroupByQueryConfig querySpecificConfig = config.withOverrides(query); final Granularity gran = query.getGranularity(); final DateTime timeStart = query.getIntervals().get(0).getStart(); .setConcurrentEventAdd(true) .setSortFacts(sortResults) .setMaxRowCount(querySpecificConfig.getMaxResults()) .buildOffheap(bufferPool); } else { .setConcurrentEventAdd(true) .setSortFacts(sortResults) .setMaxRowCount(querySpecificConfig.getMaxResults()) .buildOnheap();
@Override public IncrementalIndex accumulate(IncrementalIndex accumulated, Row in) { if (accumulated.add(Rows.toCaseInsensitiveInputRow(in, dimensions)) > config.getMaxResults()) { throw new ISE("Computation exceeds maxRows limit[%s]", config.getMaxResults()); } return accumulated; } };
if (unprocessedKeys != null || rowUpdater.getNumRows() > config.getMaxIntermediateRows()) { break;
@Override public QueryRunner<Row> mergeRunners(final ExecutorService queryExecutor, Iterable<QueryRunner<Row>> queryRunners) if (config.get().isSingleThreaded()) { return new ConcatQueryRunner<Row>( Sequences.map(
final GroupByQueryConfig querySpecificConfig = config.withOverrides(query); final boolean isSingleThreaded = querySpecificConfig.isSingleThreaded();