@Override public boolean handleRequest( RequestContext context, DataApiRequest request, DruidAggregationQuery<?> druidQuery, ResponseProcessor response ) { return next.handleRequest(context, request, druidQuery, response); } }
@Override public boolean handleRequest( RequestContext context, DataApiRequest request, DruidAggregationQuery<?> druidQuery, ResponseProcessor response ) { if (request.getTopN().isPresent() && !(druidQuery instanceof TopNQuery)) { TopNResultSetMapper mapper = new TopNResultSetMapper(request.getTopN().getAsInt()); // Add topN mapper after partial data mapper and before any other mapper try { // Index is 1 because we assume that partial data result set mapper has been injected already // by the PartialDataRequestHandler ((MappingResponseProcessor) response).getMappers().add(1, mapper); } catch (ClassCastException cce) { throw new IllegalStateException("TopN request handler requires a mapping response processor.", cce); } } return next.handleRequest(context, request, druidQuery, response); } }
@Override public boolean handleRequest( RequestContext context, DataApiRequest request, DruidAggregationQuery<?> druidQuery, ResponseProcessor response ) { MappingResponseProcessor mappingResponse = (MappingResponseProcessor) response; if (request.getDateTimeSort().isPresent()) { mappingResponse.getMappers().add(new DateTimeSortMapper(request.getDateTimeSort().get().getDirection())); } return next.handleRequest(context, request, druidQuery, mappingResponse); } }
@Override public boolean handleRequest( final RequestContext context, final DataApiRequest request, final DruidAggregationQuery<?> druidQuery, final ResponseProcessor response ) { if (request.getFormat() != DefaultResponseFormatType.DEBUG) { return next.handleRequest(context, request, druidQuery, response); } response.getErrorCallback(druidQuery).dispatch(200, request.getFormat().toString(), "DEBUG"); return true; } }
next.handleRequest(context, request, druidQuery, response); } catch (Throwable e) { LOG.info("Exception processing druid call in success", e);
@Override public boolean handleRequest( RequestContext context, DataApiRequest request, DruidAggregationQuery<?> druidQuery, ResponseProcessor response ) { if (!(response instanceof MappingResponseProcessor)) { throw new IllegalStateException("Pagination request handler requires a mapping response."); } MappingResponseProcessor mappingResponse = (MappingResponseProcessor) response; if (request.getPaginationParameters().isPresent()) { PaginationParameters paginationParameters = request.getPaginationParameters().get(); mappingResponse.getMappers().add( new PaginationMapper( paginationParameters, mappingResponse, context.getUriBuilder() ) ); } return next.handleRequest(context, request, druidQuery, mappingResponse); } }
return next.handleRequest(context, request, druidQuery, response); q -> { RequestLog.restore(logCtx); next.handleRequest(context, request, q, mergingResponse);
@Override public boolean handleRequest( RequestContext context, DataApiRequest request, DruidAggregationQuery<?> druidQuery, ResponseProcessor response ) { return next.handleRequest( context, request, druidQuery.withContext( druidQuery.getContext().withUncoveredIntervalsLimit(druidUncoveredIntervalLimit) ), new DruidPartialDataResponseProcessor(response) ); } }
); return next.handleRequest(context, request, druidQuery, nextResponse);
boolean complete = dataRequestHandler.handleRequest(context, apiRequest, druidQuery, responseProcessor); if (!complete) { throw new IllegalStateException("No request handler accepted request.");
return next.handleRequest(context, request, druidQuery, nextResponse);
); return next.handleRequest(context, request, druidQuery, nextResponse);
return next.handleRequest(context, request, druidQuery, response);
return next.handleRequest(context, request, druidQuery, response);
return next.handleRequest(context, request, druidQuery, mappingResponse);
@Override public boolean handleRequest( RequestContext context, DataApiRequest request, DruidAggregationQuery<?> druidQuery, ResponseProcessor response ) { if (!(response instanceof MappingResponseProcessor)) { throw new IllegalStateException("Volatile data request handler requires a mapping response."); } MappingResponseProcessor mappingResponse = (MappingResponseProcessor) response; // Gather the volatile intervals. A volatile interval in one data source make that interval volatile overall. SimplifiedIntervalList volatileIntervals = volatileIntervalsService.getVolatileIntervals( druidQuery.getGranularity(), druidQuery.getIntervals(), physicalTableDictionary.get( druidQuery .getInnermostQuery() .getDataSource() .getPhysicalTable() .getName() ) ); if (!volatileIntervals.isEmpty()) { ResponseContext responseContext = response.getResponseContext(); responseContext.put(VOLATILE_INTERVALS_CONTEXT_KEY.getName(), volatileIntervals); } return next.handleRequest(context, request, druidQuery, mappingResponse); }