@Override public Sequence<T> run(QueryPlus<T> queryPlus, Map<String, Object> responseContext) { return Sequences.empty(); } }
@Override public Sequence<T> run(QueryPlus<T> queryPlus, Map<String, Object> responseContext) { List<SegmentDescriptor> missingSegments = (List<SegmentDescriptor>) responseContext.get(Result.MISSING_SEGMENTS_KEY); if (missingSegments == null) { missingSegments = new ArrayList<>(); responseContext.put(Result.MISSING_SEGMENTS_KEY, missingSegments); } missingSegments.add(descriptor); return Sequences.empty(); } }
@Override public Sequence run(QueryPlus queryPlus, Map responseContext) { return Sequences.empty(); } },
@Override public Sequence<Object[]> runQuery() { final DruidRel<?> rel = getLeftRelWithFilter(); if (rel != null) { return rel.runQuery(); } else { return Sequences.empty(); } }
@Override @SuppressWarnings("unchecked") public Sequence<Object[]> runQuery() { // Lazy: run each query in sequence, not all at once. if (limit == 0) { return Sequences.empty(); } else { final Sequence baseSequence = Sequences.concat( FluentIterable.from(rels).transform(rel -> ((DruidRel) rel).runQuery()) ); return limit > 0 ? baseSequence.limit(limit) : baseSequence; } }
@Override public Sequence<Object[]> runQuery() { // runQuery doesn't need to finalize aggregations, because the fact that runQuery is happening suggests this // is the outermost query and it will actually get run as a native query. Druid's native query layer will // finalize aggregations for the outermost query even if we don't explicitly ask it to. final DruidQuery query = toDruidQuery(false); if (query != null) { return getQueryMaker().runQuery(query); } else { return Sequences.empty(); } }
@Override public Sequence<Result<TimeseriesResultValue>> run(QueryPlus queryPlus, Map context) { ((List) context.get(Result.MISSING_SEGMENTS_KEY)).add( new SegmentDescriptor(Intervals.utc(178888, 1999999), "test", 1) ); return Sequences.empty(); } },
@Override public Sequence<Result<TimeseriesResultValue>> run( QueryPlus<Result<TimeseriesResultValue>> queryPlus, Map<String, Object> context ) { ((List) context.get(Result.MISSING_SEGMENTS_KEY)).add( new SegmentDescriptor(Intervals.utc(178888, 1999999), "test", 1) ); return Sequences.empty(); } },
TimelineLookup<String, ServerSelector> timeline = serverView.getTimeline(query.getDataSource()); if (timeline == null) { return Sequences.empty(); final String currentEtag = computeCurrentEtag(segments, queryCacheKey); if (currentEtag != null && currentEtag.equals(prevEtag)) { return Sequences.empty();
@Override public Sequence<Cursor> makeCursors( @Nullable final Filter filter, final Interval interval, final VirtualColumns virtualColumns, final Granularity gran, final boolean descending, @Nullable QueryMetrics<?> queryMetrics ) { if (index.isEmpty()) { return Sequences.empty(); } final Interval dataInterval = new Interval(getMinTime(), gran.bucketEnd(getMaxTime())); if (!interval.overlaps(dataInterval)) { return Sequences.empty(); } final Interval actualInterval = interval.overlap(dataInterval); Iterable<Interval> intervals = gran.getIterable(actualInterval); if (descending) { intervals = Lists.reverse(ImmutableList.copyOf(intervals)); } return Sequences .simple(intervals) .map(i -> new IncrementalIndexCursor(virtualColumns, descending, filter, i, actualInterval, gran)); }
@Test public void testDefaultNoChunking() { QueryPlus queryPlus = QueryPlus.wrap(queryBuilder.intervals("2014/2016").build()); EasyMock.expect(baseRunner.run(queryPlus, Collections.EMPTY_MAP)).andReturn(Sequences.empty()); EasyMock.replay(baseRunner); QueryRunner runner = decorator.decorate(baseRunner, toolChest); runner.run(queryPlus, Collections.EMPTY_MAP); EasyMock.verify(baseRunner); }
long count = (long) responseContext.get(ScanQueryRunnerFactory.CTX_COUNT); if (count >= query.getLimit()) { return Sequences.empty();
@Override public Sequence<Result<TimeseriesResultValue>> run( QueryPlus<Result<TimeseriesResultValue>> queryPlus, Map<String, Object> context ) { if ((int) context.get("count") < 3) { ((List) context.get(Result.MISSING_SEGMENTS_KEY)).add( new SegmentDescriptor(Intervals.utc(178888, 1999999), "test", 1) ); context.put("count", (int) context.get("count") + 1); return Sequences.empty(); } else { return Sequences.simple( Collections.singletonList( new Result<>( DateTimes.nowUtc(), new TimeseriesResultValue( new HashMap<>() ) ) ) ); } } },
/** * Execute the query. Can only be called if the query has been authorized. Note that query logs and metrics will * not be emitted automatically when the Sequence is fully iterated. It is the caller's responsibility to call * {@link #emitLogsAndMetrics(Throwable, String, long)} to emit logs and metrics. * * @return result sequence and response context */ public QueryResponse execute() { transition(State.AUTHORIZED, State.EXECUTING); final Map<String, Object> responseContext = DirectDruidClient.makeResponseContextForQuery(); final Sequence res = QueryPlus.wrap(baseQuery) .withIdentity(authenticationResult.getIdentity()) .run(texasRanger, responseContext); return new QueryResponse(res == null ? Sequences.empty() : res, responseContext); }
@Override public Sequence<Result<TimeseriesResultValue>> run( QueryPlus<Result<TimeseriesResultValue>> queryPlus, Map<String, Object> context ) { if ((int) context.get("count") == 0) { ((List) context.get(Result.MISSING_SEGMENTS_KEY)).add( new SegmentDescriptor(Intervals.utc(178888, 1999999), "test", 1) ); context.put("count", 1); return Sequences.empty(); } else { return Sequences.simple( Collections.singletonList( new Result<>( DateTimes.nowUtc(), new TimeseriesResultValue( new HashMap<>() ) ) ) ); } } },
return Sequences.empty();
.andReturn(Sequences.empty()) .anyTimes(); EasyMock.expect(serverView.getQueryRunner(lastServer))
@Override public Sequence<T> run(QueryPlus<T> queryPlus, Map<String, Object> responseContext) { return Sequences.empty(); } }
@Override public Sequence<Object[]> runQuery() { final DruidRel<?> rel = getLeftRelWithFilter(); if (rel != null) { return rel.runQuery(); } else { return Sequences.empty(); } }
@Override public Sequence<Object[]> runQuery() { // runQuery doesn't need to finalize aggregations, because the fact that runQuery is happening suggests this // is the outermost query and it will actually get run as a native query. Druid's native query layer will // finalize aggregations for the outermost query even if we don't explicitly ask it to. final DruidQuery query = toDruidQuery(false); if (query != null) { return getQueryMaker().runQuery(query); } else { return Sequences.empty(); } }