final GroupByQuery newQuery = new GroupByQuery( query.getDataSource(), query.getQuerySegmentSpec(), query.getVirtualColumns(), query.getDimFilter(), query.getGranularity(), query.getDimensions(), query.getAggregatorSpecs(), query.getPostAggregatorSpecs(), query.getLimitSpec(), query.getContext() ).withOverriddenContext( ImmutableMap.<String, Object>of( "finalize", false, CTX_KEY_OUTERMOST, false, GroupByQueryConfig.CTX_KEY_APPLY_LIMIT_PUSH_DOWN, query.isApplyLimitPushDown() if (query.getContextBoolean(CTX_KEY_OUTERMOST, true)) { return query.postProcess(rowSequence); } else { return rowSequence;
@Override public ServiceMetricEvent.Builder makeMetricBuilder(GroupByQuery query) { int numMinutes = 0; for (Interval interval : query.getIntervals()) { numMinutes += Minutes.minutesIn(interval).getMinutes(); } return new ServiceMetricEvent.Builder() .setUser2(query.getDataSource().toString()) .setUser3(String.format("%,d dims", query.getDimensions().size())) .setUser4("groupBy") .setUser5(Joiner.on(",").join(query.getIntervals())) .setUser6(String.valueOf(query.hasFilters())) .setUser7(String.format("%,d aggs", query.getAggregatorSpecs().size())) .setUser9(Minutes.minutes(numMinutes).toString()); }
public Builder(GroupByQuery query) { dataSource = query.getDataSource(); querySegmentSpec = query.getQuerySegmentSpec(); virtualColumns = query.getVirtualColumns(); dimFilter = query.getDimFilter(); granularity = query.getGranularity(); dimensions = query.getDimensions(); aggregatorSpecs = query.getAggregatorSpecs(); postAggregatorSpecs = query.getPostAggregatorSpecs(); havingSpec = query.getHavingSpec(); limitSpec = query.getLimitSpec(); postProcessingFn = query.postProcessingFn; context = query.getContext(); }
@Override public GroupByQuery withQuerySegmentSpec(QuerySegmentSpec spec) { return new GroupByQuery( getDataSource(), spec, dimFilter, granularity, dimensions, aggregatorSpecs, postAggregatorSpecs, havingSpec, limitSpec, orderByLimitFn, getContext() ); }
@Override public GroupByQuery withOverriddenContext(Map<String, Object> contextOverride) { return new GroupByQuery( getDataSource(), getQuerySegmentSpec(), dimFilter, granularity, dimensions, aggregatorSpecs, postAggregatorSpecs, havingSpec, limitSpec, orderByLimitFn, computeOverridenContext(contextOverride) ); }
final QueryGranularity gran = query.getGranularity(); final long timeStart = query.getIntervals().get(0).getStartMillis(); query.getAggregatorSpecs(), new Function<AggregatorFactory, AggregatorFactory>() query.getDimensions(), new Function<DimensionSpec, String>()
final List<Interval> intervals = query.getQuerySegmentSpec().getIntervals(); if (intervals.size() != 1) { throw new IAE("Should only have one interval, got[%s]", intervals); Filters.toFilter(query.getDimFilter()), intervals.get(0), query.getVirtualColumns(), query.getGranularity(), false, null .getDimensions() .stream() .allMatch(dimension -> { query.getContextValue(GroupByStrategyV2.CTX_KEY_FUDGE_TIMESTAMP, "") );
for (DimensionSpec dimension : subquery.getDimensions()) { dimensionNames.add(dimension.getOutputName()); for (AggregatorFactory aggregatorFactory : query.getAggregatorSpecs()) { for (final AggregatorFactory transferAgg : aggregatorFactory.getRequiredColumns()) { if (dimensionNames.contains(transferAgg.getName())) { .setInterval(subquery.getIntervals()) .setPostAggregatorSpecs(Lists.<PostAggregator>newArrayList()) .build(); .setLimitSpec(query.getLimitSpec().merge(subquery.getLimitSpec())) .build(); innerQuery.withOverriddenContext( ImmutableMap.<String, Object>of( GroupByQueryHelper.CTX_KEY_SORT_RESULTS, true Sequences.concat( Sequences.map( Sequences.simple(outerQuery.getIntervals()), new Function<Interval, Sequence<Row>>() outerQuery.postProcess(GroupByQueryHelper.postAggregate(query, outerQueryResultIndex)), outerQueryResultIndex );
@Override public byte[] computeCacheKey(GroupByQuery query) { return new CacheKeyBuilder(GROUPBY_QUERY) .appendByte(CACHE_STRATEGY_VERSION) .appendCacheable(query.getGranularity()) .appendCacheable(query.getDimFilter()) .appendCacheables(query.getAggregatorSpecs()) .appendCacheables(query.getDimensions()) .appendCacheable(query.getVirtualColumns()) .build(); }
private final List<AggregatorFactory> aggs = query.getAggregatorSpecs(); private final List<DimensionSpec> dims = query.getDimensions();
@Override public String toString() { return "GroupByQuery{" + "dataSource='" + getDataSource() + '\'' + ", querySegmentSpec=" + getQuerySegmentSpec() + ", virtualColumns=" + virtualColumns + ", limitSpec=" + limitSpec + ", dimFilter=" + dimFilter + ", granularity=" + getGranularity() + ", dimensions=" + dimensions + ", aggregatorSpecs=" + aggregatorSpecs + ", postAggregatorSpecs=" + postAggregatorSpecs + ", havingSpec=" + havingSpec + '}'; }
final GroupByQueryConfig querySpecificConfig = config.withOverrides(query); final AggregatorFactory[] aggregatorFactories = new AggregatorFactory[query.getAggregatorSpecs().size()]; for (int i = 0; i < query.getAggregatorSpecs().size(); i++) { aggregatorFactories[i] = query.getAggregatorSpecs().get(i); StringUtils.format("druid-groupBy-%s_%s", UUID.randomUUID(), query.getId()) ); final List<Interval> queryIntervals = query.getIntervals(); final Filter filter = Filters.convertToCNFFromQueryContext( query, Filters.toFilter(query.getDimFilter()) );
final List<Interval> intervals = query.getQuerySegmentSpec().getIntervals(); if (intervals.size() != 1) { throw new IAE("Should only have one interval, got[%s]", intervals); Filter filter = Filters.convertToCNFFromQueryContext(query, Filters.toFilter(query.getDimFilter())); query.getVirtualColumns(), query.getGranularity(), false, null
if (groupByQuery.getDimFilter() != null) { groupByQuery = groupByQuery.withDimFilter(groupByQuery.getDimFilter().optimize()); for (DimensionSpec dimensionSpec : delegateGroupByQuery.getDimensions()) { if (optimizedDimensions.contains(dimensionSpec.getDimension())) { dimensionSpecs.add( queryPlus.withQuery(delegateGroupByQuery.withDimensionSpecs(dimensionSpecs)), responseContext );
final DataSource dataSource = query.getDataSource(); if (query.getContext() != null) { for (Map.Entry<String, Object> entry : query.getContext().entrySet()) { if (entry.getValue() != null) { subqueryContext.put(entry.getKey(), entry.getValue()); subquery.withOverriddenContext( ImmutableMap.<String, Object>of(
final List<Interval> intervals = query.getQuerySegmentSpec().getIntervals(); if (intervals.size() != 1) { throw new IAE("Should only have one interval, got[%s]", intervals); Filters.convertDimensionFilters(query.getDimFilter()), intervals.get(0), query.getGranularity() );
final boolean forceChainedExecution = query.getContextBoolean( CTX_KEY_MERGE_RUNNERS_USING_CHAINED_EXECUTION, false final QueryPlus<Row> queryPlusForRunners = queryPlus .withQuery( query.withOverriddenContext(ImmutableMap.<String, Object>of(CTX_KEY_MERGE_RUNNERS_USING_CHAINED_EXECUTION, true)) final AggregatorFactory[] combiningAggregatorFactories = new AggregatorFactory[query.getAggregatorSpecs().size()]; for (int i = 0; i < query.getAggregatorSpecs().size(); i++) { combiningAggregatorFactories[i] = query.getAggregatorSpecs().get(i).getCombiningFactory(); StringUtils.format("druid-groupBy-%s_%s", UUID.randomUUID(), query.getId()) );
final List<ValueType> valueTypes = DimensionHandlerUtils.getValueTypesFromDimensionSpecs(query.getDimensions()); final ColumnSelectorFactory columnSelectorFactory = query.getVirtualColumns().wrap( RowBasedColumnSelectorFactory.create( columnSelectorRow, final boolean willApplyLimitPushDown = query.isApplyLimitPushDown(); final DefaultLimitSpec limitSpec = willApplyLimitPushDown ? (DefaultLimitSpec) query.getLimitSpec() : null; boolean sortHasNonGroupingFields = false; if (willApplyLimitPushDown) { sortHasNonGroupingFields = DefaultLimitSpec.sortingOrderHasNonGroupingFields( limitSpec, query.getDimensions() ); query.getContextSortByDimsFirst(), query.getDimensions(), querySpecificConfig.getMaxMergingDictionarySize() / (concurrencyHint == -1 ? 1 : concurrencyHint), valueTypes, final Grouper.KeySerdeFactory<RowBasedKey> combineKeySerdeFactory = new RowBasedKeySerdeFactory( includeTimestamp, query.getContextSortByDimsFirst(), query.getDimensions(), querySpecificConfig.getMaxMergingDictionarySize(), // use entire dictionary space for combining key serde valueTypes, final int keySize = includeTimestamp ? query.getDimensions().size() + 1 : query.getDimensions().size(); final ValueExtractFunction valueExtractFn = makeValueExtractFunction(
/** * If "query" has a single universal timestamp, return it. Otherwise return null. This is useful * for keeping timestamps in sync across partial queries that may have different intervals. * * @param query the query * * @return universal timestamp, or null */ public static DateTime getUniversalTimestamp(final GroupByQuery query) { final Granularity gran = query.getGranularity(); final String timestampStringFromContext = query.getContextValue(CTX_KEY_FUDGE_TIMESTAMP, ""); if (!timestampStringFromContext.isEmpty()) { return DateTimes.utc(Long.parseLong(timestampStringFromContext)); } else if (Granularities.ALL.equals(gran)) { final DateTime timeStart = query.getIntervals().get(0).getStart(); return gran.getIterable(new Interval(timeStart, timeStart.plus(1))).iterator().next().getStart(); } else { return null; } }
@Override public void numMetrics(GroupByQuery query) { setDimension("numMetrics", String.valueOf(query.getAggregatorSpecs().size())); }