/** * Constructor. * * @param webService UI Web Service * @param webServiceNext Handler for the UI path */ public DefaultWebServiceHandlerSelector( DruidWebService webService, DataRequestHandler webServiceNext ) { webServiceHandler = new WebServiceHandler(webService, webServiceNext); }
/** * Constructor. * * @param next Next Handler in the chain * @param mapper The mapper for all JSON processing */ public SqlRequestHandler(DataRequestHandler next, ObjectMapper mapper) { this.next = next; initializeSqlBackend(mapper); }
@Override public boolean handleRequest( RequestContext context, DataApiRequest request, DruidAggregationQuery<?> druidQuery, ResponseProcessor response ) { return next.handleRequest(context, request, druidQuery, response); } }
DataRequestHandler handler = new AsyncWebServiceRequestHandler(webService, mapper); handler = new DruidPartialDataRequestHandler(handler); handler = new CacheRequestHandler(handler, dataCache, mapper); } else if (CacheFeatureFlag.LOCAL_SIGNATURE.isOn()) { handler = new CacheV2RequestHandler(handler, dataCache, querySigningService, mapper); } else if (CacheFeatureFlag.ETAG.isOn()) { handler = new EtagCacheRequestHandler( handler, (TupleDataCache<String, String, String>) dataCache, handler = new SplitQueryRequestHandler(handler); handler = new WeightCheckRequestHandler(handler, webService, weightUtil, mapper); handler = new DebugRequestHandler(handler, mapper); handler = new WebServiceSelectorRequestHandler( webService, handler, handler = new SqlRequestHandler(handler, mapper); handler = new PaginationRequestHandler(handler); handler = new DateTimeSortRequestHandler(handler); handler = new TopNMapperRequestHandler(handler);
cacheKey = getKey(druidQuery); if (context.isReadCache()) { final String jsonResult = dataCache.get(cacheKey); CACHE_REQUESTS.mark(1); if (context.getNumberOfOutgoing().decrementAndGet() == 0) { RequestLog.stopTiming(REQUEST_WORKFLOW_TIMER); if (context.getNumberOfIncoming().decrementAndGet() == 0) { RequestLog.startTiming(RESPONSE_WORKFLOW_TIMER); ); return next.handleRequest(context, request, druidQuery, nextResponse);
return next.handleRequest(context, request, druidQuery, response); final SuccessCallback weightQuerySuccess = buildSuccessCallback( context, request,
@Override public boolean handleRequest( RequestContext context, DataApiRequest request, DruidAggregationQuery<?> druidQuery, ResponseProcessor response ) { WebServiceHandler handler = handlerSelector.select(druidQuery, request, context); // Add a timeout to the query if the selected webService is configured with one Integer timeout = handler.getWebService().getTimeout(); // Add a priority to the query if there is one configured Integer priority = handler.getWebService().getServiceConfig().getPriority(); QueryContext qc = druidQuery.getContext().withTimeout(timeout).withPriority(priority); if (!qc.isEmpty()) { druidQuery = druidQuery.withContext(qc); } return handler.handleRequest(context, request, druidQuery, response); } }
@Override public boolean handleRequest( RequestContext context, DataApiRequest request, DruidAggregationQuery<?> druidQuery, ResponseProcessor response ) { if (!(response instanceof MappingResponseProcessor)) { throw new IllegalStateException("Pagination request handler requires a mapping response."); } MappingResponseProcessor mappingResponse = (MappingResponseProcessor) response; if (request.getPaginationParameters().isPresent()) { PaginationParameters paginationParameters = request.getPaginationParameters().get(); mappingResponse.getMappers().add( new PaginationMapper( paginationParameters, mappingResponse, context.getUriBuilder() ) ); } return next.handleRequest(context, request, druidQuery, mappingResponse); } }
/** * A request is cacheable if it does not refer to partial data. * * @return whether request can be cached */ private boolean isCacheable() { SimplifiedIntervalList missingIntervals = getPartialIntervalsWithDefault(getResponseContext()); SimplifiedIntervalList volatileIntervals = getVolatileIntervalsWithDefault(getResponseContext()); return missingIntervals.isEmpty() && volatileIntervals.isEmpty(); } }
/** * Builds error response from exception without group by. * * @param status the response status * @param cause exception * @param writer The Writer to use for writing errors * * @return Response */ public static javax.ws.rs.core.Response makeErrorResponse( StatusType status, Throwable cause, ObjectWriter writer ) { return makeErrorResponse(status, null, cause, writer); }
/** * Constructor. * * @param webService UI Web Service * @param webserviceNext Handler for the UI path * @param mapper Mapper to use when processing JSON */ public WebServiceSelectorRequestHandler( DruidWebService webService, DataRequestHandler webserviceNext, ObjectMapper mapper ) { this( new DefaultWebServiceHandlerSelector( webService, webserviceNext ), mapper ); }
@Override public void query(Dimension dimension, DataSource dataSource) { // Success callback will update the dimension cache SuccessCallback success = buildDruidDimensionsSuccessCallback(dimension); DruidSearchQuery druidSearchQuery = new DruidSearchQuery( dataSource, AllGranularity.INSTANCE, null, Collections.singletonList(INTERVAL), Collections.singletonList(dimension), SEARCH_QUERY_SPEC, null, ROW_LIMIT ); RequestContext requestContext = new RequestContext(null, false); druidWebService.postDruidQuery( requestContext, success, errorCallback, failureCallback, druidSearchQuery ); }
DataRequestHandler handler = new AsyncWebServiceRequestHandler(webService, mapper); handler = new DruidPartialDataRequestHandler(handler); handler = new CacheRequestHandler(handler, dataCache, mapper); } else if (CacheFeatureFlag.LOCAL_SIGNATURE.isOn()) { handler = new CacheV2RequestHandler(handler, dataCache, querySigningService, mapper); } else if (CacheFeatureFlag.ETAG.isOn()) { handler = new EtagCacheRequestHandler( handler, (TupleDataCache<String, String, String>) dataCache, handler = new SplitQueryRequestHandler(handler); handler = new WeightCheckRequestHandler(handler, webService, weightUtil, mapper); handler = new DebugRequestHandler(handler, mapper); handler = new WebServiceSelectorRequestHandler( webService, handler, handler = new PaginationRequestHandler(handler); handler = new DateTimeSortRequestHandler(handler); handler = new TopNMapperRequestHandler(handler); handler = new PartialDataRequestHandler(handler, partialDataHandler);
@Override public boolean handleRequest( final RequestContext context, final DataApiRequest request, final DruidAggregationQuery<?> druidQuery, final ResponseProcessor response ) { if (request.getFormat() != DefaultResponseFormatType.DEBUG) { return next.handleRequest(context, request, druidQuery, response); } response.getErrorCallback(druidQuery).dispatch(200, request.getFormat().toString(), "DEBUG"); return true; } }
/** * A request is cacheable if it does not refer to partial data. * * @return whether request can be cached */ protected boolean isCacheable() { SimplifiedIntervalList missingIntervals = getPartialIntervalsWithDefault(getResponseContext()); SimplifiedIntervalList volatileIntervals = getVolatileIntervalsWithDefault(getResponseContext()); return missingIntervals.isEmpty() && volatileIntervals.isEmpty(); } }
/** * Prepare Response object from error details with reason and description. * * @param statusCode Error status code * @param reason Brief reason about the error * @param description Description of the error * @param druidQuery Druid query associated with the an error * * @return Publishable Response object */ public javax.ws.rs.core.Response buildErrorResponse( int statusCode, String reason, String description, DruidQuery<?> druidQuery ) { return RequestHandlerUtils.makeErrorResponse( statusCode, reason, description, druidQuery, objectMappers.getMapper().writer() ); }
@Override public boolean handleRequest( RequestContext context, DataApiRequest request, DruidAggregationQuery<?> druidQuery, ResponseProcessor response ) { MappingResponseProcessor mappingResponse = (MappingResponseProcessor) response; if (request.getDateTimeSort().isPresent()) { mappingResponse.getMappers().add(new DateTimeSortMapper(request.getDateTimeSort().get().getDirection())); } return next.handleRequest(context, request, druidQuery, mappingResponse); } }
@Override public void handleThrowable( Throwable e, AsyncResponse asyncResponse, Optional<DataApiRequest> apiRequest, ContainerRequestContext containerRequestContext, ObjectWriter writer ) { if (e instanceof RequestValidationException) { LOG.debug(e.getMessage(), e); RequestValidationException rve = (RequestValidationException) e; asyncResponse.resume(RequestHandlerUtils.makeErrorResponse(rve.getStatus(), rve, writer)); } else if (e instanceof NoMatchFoundException) { LOG.info("Exception processing request", e); asyncResponse.resume(RequestHandlerUtils.makeErrorResponse(INTERNAL_SERVER_ERROR, e, writer)); } else if (e instanceof TimeoutException) { LOG.info("Exception processing request", e); asyncResponse.resume(RequestHandlerUtils.makeErrorResponse(GATEWAY_TIMEOUT, e, writer)); } else { LOG.info("Exception processing request", e); asyncResponse.resume(RequestHandlerUtils.makeErrorResponse(BAD_REQUEST, e, writer)); } } }
@Override public boolean handleRequest( RequestContext context, DataApiRequest request, DruidAggregationQuery<?> druidQuery, ResponseProcessor response ) { return next.handleRequest( context, request, druidQuery.withContext( druidQuery.getContext().withUncoveredIntervalsLimit(druidUncoveredIntervalLimit) ), new DruidPartialDataResponseProcessor(response) ); } }
/** * Prepare Response object from error details with reason and description. * * @param responseException The error that needs to be transmitted to the user * * @return Publishable Response object */ private javax.ws.rs.core.Response buildErrorResponse(ResponseException responseException) { return RequestHandlerUtils.makeErrorResponse( responseException.getStatusCode(), responseException.getReason(), responseException.getDescription(), responseException.getDruidQuery(), writer ); } }