public static <T> int getPriority(Query<T> query) { return getPriority(query, DEFAULT_PRIORITY); }
public Query<T> withTimeoutAndMaxScatterGatherBytes(Query<T> query, ServerConfig serverConfig) { Query<T> newQuery = QueryContexts.verifyMaxQueryTimeout( QueryContexts.withMaxScatterGatherBytes( QueryContexts.withDefaultTimeout( query, Math.min(serverConfig.getDefaultQueryTimeout(), serverConfig.getMaxQueryTimeout()) ), serverConfig.getMaxScatterGatherBytes() ), serverConfig.getMaxQueryTimeout() ); return newQuery.withOverriddenContext(ImmutableMap.of(DirectDruidClient.QUERY_FAIL_TIME, this.startTimeMillis + QueryContexts.getTimeout(newQuery))); } }
public static <T> int getUncoveredIntervalsLimit(Query<T> query) { return getUncoveredIntervalsLimit(query, DEFAULT_UNCOVERED_INTERVALS_LIMIT); }
public static <T> long getTimeout(Query<T> query) { return getTimeout(query, getDefaultTimeout(query)); }
@Override public Sequence<T> get() { try { if (QueryContexts.hasTimeout(query)) { return future.get(QueryContexts.getTimeout(query), TimeUnit.MILLISECONDS); } else { return future.get(); } } catch (ExecutionException | InterruptedException | TimeoutException ex) { throw Throwables.propagate(ex); } } });
if (QueryContexts.isBySegment(query) || forceChainedExecution) { ChainedExecutionQueryRunner<Row> runner = new ChainedExecutionQueryRunner<>(exec, queryWatcher, queryables); return runner.run(queryPlusForRunners, responseContext); ); final int priority = QueryContexts.getPriority(query); final long queryTimeout = QueryContexts.getTimeout(query); final boolean hasTimeout = QueryContexts.hasTimeout(query); final long timeoutAt = System.currentTimeMillis() + queryTimeout;
boolean isBySegment = QueryContexts.isBySegment(query); final long maxScatterGatherBytes = QueryContexts.getMaxScatterGatherBytes(query); final AtomicLong totalBytesGathered = (AtomicLong) context.get(QUERY_TOTAL_BYTES_GATHERED); final long maxQueuedBytes = QueryContexts.getMaxQueuedBytes(query, 0); final boolean usingBackpressure = maxQueuedBytes > 0; HttpMethod.POST, new URL(url) ).setContent(objectMapper.writeValueAsBytes(QueryContexts.withTimeout(query, timeLeft))) .setHeader( HttpHeaders.Names.CONTENT_TYPE,
public static <T> boolean isBySegment(Query<T> query) { return isBySegment(query, DEFAULT_BY_SEGMENT); }
SpecificQueryRunnable(final QueryPlus<T> queryPlus, final Map<String, Object> responseContext) { this.queryPlus = queryPlus; this.responseContext = responseContext; this.query = queryPlus.getQuery(); this.toolChest = warehouse.getToolChest(query); this.strategy = toolChest.getCacheStrategy(query); this.useCache = CacheUtil.useCacheOnBrokers(query, strategy, cacheConfig); this.populateCache = CacheUtil.populateCacheOnBrokers(query, strategy, cacheConfig); this.isBySegment = QueryContexts.isBySegment(query); // Note that enabling this leads to putting uncovered intervals information in the response headers // and might blow up in some cases https://github.com/apache/incubator-druid/issues/2108 this.uncoveredIntervalsLimit = QueryContexts.getUncoveredIntervalsLimit(query); this.downstreamQuery = query.withOverriddenContext(makeDownstreamQueryContext()); // For nested queries, we need to look at the intervals of the inner most query. this.intervals = query.getIntervalsOfInnerMostQuery(); }
final boolean isBySegment = QueryContexts.isBySegment(query); final boolean shouldFinalize = QueryContexts.isFinalize(query, true);
); final Pair<Queue, Accumulator<Queue, T>> bySegmentAccumulatorPair = GroupByQueryHelper.createBySegmentAccumulatorPair(); final boolean bySegment = QueryContexts.isBySegment(query); final int priority = QueryContexts.getPriority(query); final QueryPlus<T> threadSafeQueryPlus = queryPlus.withoutThreadUnsafeState(); final ListenableFuture<List<Void>> futures = Futures.allAsList(
boolean shouldFinalize = QueryContexts.isFinalize(query, true); boolean serializeDateTimeAsLong = QueryContexts.isSerializeDateTimeAsLong(query, false) || (!shouldFinalize && QueryContexts.isSerializeDateTimeAsLongInner(query, false)); final ObjectWriter jsonWriter = context.newOutputWriter(serializeDateTimeAsLong); Response.ResponseBuilder builder = Response
public static <T> boolean hasTimeout(Query<T> query) { return getTimeout(query) != NO_TIMEOUT; }
public static <T> boolean isPopulateResultLevelCache(Query<T> query) { return isPopulateResultLevelCache(query, DEFAULT_POPULATE_RESULTLEVEL_CACHE); }
public static <T> boolean isUseCache(Query<T> query) { return isUseCache(query, DEFAULT_USE_CACHE); }
public static <T> boolean isPopulateCache(Query<T> query) { return isPopulateCache(query, DEFAULT_POPULATE_CACHE); }
public static <T> boolean isUseResultLevelCache(Query<T> query) { return isUseResultLevelCache(query, DEFAULT_USE_RESULTLEVEL_CACHE); }
private Sequence<Row> finalizeSubqueryResults(Sequence<Row> subqueryResult, GroupByQuery subquery) { final Sequence<Row> finalizingResults; if (QueryContexts.isFinalize(subquery, false)) { finalizingResults = new MappedSequence<>( subqueryResult, makePreComputeManipulatorFn( subquery, MetricManipulatorFns.finalizing() )::apply ); } else { finalizingResults = subqueryResult; } return finalizingResults; }
@Test public void testQueryMaxTimeout() { exception.expect(IAE.class); exception.expectMessage("configured [timeout = 1000] is more than enforced limit of maxQueryTimeout [100]."); Query<?> query = new TestQuery( new TableDataSource("test"), new MultipleIntervalSegmentSpec(ImmutableList.of(Intervals.of("0/100"))), false, ImmutableMap.of(QueryContexts.TIMEOUT_KEY, 1000) ); QueryContexts.verifyMaxQueryTimeout(query, 100); }
if (QueryContexts.isBySegment(query) || forceChainedExecution) { ChainedExecutionQueryRunner<Row> runner = new ChainedExecutionQueryRunner<>(exec, queryWatcher, queryables); return runner.run(queryPlusForRunners, responseContext); ); final int priority = QueryContexts.getPriority(query); final long queryTimeout = QueryContexts.getTimeout(query); final boolean hasTimeout = QueryContexts.hasTimeout(query); final long timeoutAt = System.currentTimeMillis() + queryTimeout;