@Override public void query(Dimension dimension, DataSource dataSource) { // Success callback will update the dimension cache SuccessCallback success = buildDruidDimensionsSuccessCallback(dimension); DruidSearchQuery druidSearchQuery = new DruidSearchQuery( dataSource, AllGranularity.INSTANCE, null, Collections.singletonList(INTERVAL), Collections.singletonList(dimension), SEARCH_QUERY_SPEC, null, ROW_LIMIT ); RequestContext requestContext = new RequestContext(null, false); druidWebService.postDruidQuery( requestContext, success, errorCallback, failureCallback, druidSearchQuery ); }
if (context.isReadCache()) { cacheKey = getKey(druidQuery); final TupleDataCache.DataEntry<String, String , String> cacheEntry = dataCache.get(cacheKey); if (cacheEntry != null) { // Current query is in data cache context.getHeaders().putSingle( eTagInRequest, mapper.readTree(cacheEntry.getValue()) ); if (context.getNumberOfOutgoing().decrementAndGet() == 0) { RequestLog.stopTiming(REQUEST_WORKFLOW_TIMER); if (context.getNumberOfIncoming().decrementAndGet() == 0) { RequestLog.startTiming(RESPONSE_WORKFLOW_TIMER); } else { // Current query is not in data cache context.getHeaders().putSingle( eTagInRequest, DruidJsonRequestContentKeys.NON_EXISTING_ETAG_VALUE.getName()
@Override public boolean handleRequest( RequestContext context, DataApiRequest request, DruidAggregationQuery<?> druidQuery, ResponseProcessor response ) { if (!(response instanceof MappingResponseProcessor)) { throw new IllegalStateException("Pagination request handler requires a mapping response."); } MappingResponseProcessor mappingResponse = (MappingResponseProcessor) response; if (request.getPaginationParameters().isPresent()) { PaginationParameters paginationParameters = request.getPaginationParameters().get(); mappingResponse.getMappers().add( new PaginationMapper( paginationParameters, mappingResponse, context.getUriBuilder() ) ); } return next.handleRequest(context, request, druidQuery, mappingResponse); } }
!context.getNumberOfIncoming().compareAndSet(1, numberOfIntervals) || !context.getNumberOfOutgoing().compareAndSet(1, numberOfIntervals) ) { String msg = "Number of sub-queries not equal to \"one\" before query splitting. Possible race condition."; msg += "Incoming: " + context.getNumberOfIncoming().get(); msg += ".Outgoing: " + context.getNumberOfIncoming().get(); LOG.error(msg); throw new IllegalStateException(msg);
cacheKey = getKey(druidQuery); if (context.isReadCache()) { final String jsonResult = dataCache.get(cacheKey); CACHE_REQUESTS.mark(1); if (context.getNumberOfOutgoing().decrementAndGet() == 0) { RequestLog.stopTiming(REQUEST_WORKFLOW_TIMER); if (context.getNumberOfIncoming().decrementAndGet() == 0) { RequestLog.startTiming(RESPONSE_WORKFLOW_TIMER);
cacheKey = getKey(druidQuery); if (context.isReadCache()) { final TupleDataCache.DataEntry<String, Long, String> cacheEntry = dataCache.get(cacheKey); CACHE_REQUESTS.mark(1); ) { try { if (context.getNumberOfOutgoing().decrementAndGet() == 0) { RequestLog.stopTiming(REQUEST_WORKFLOW_TIMER); if (context.getNumberOfIncoming().decrementAndGet() == 0) { RequestLog.startTiming(RESPONSE_WORKFLOW_TIMER);
if (context.getNumberOfOutgoing().decrementAndGet() == 0) { RequestLog.stopTiming(REQUEST_WORKFLOW_TIMER); outstanding = context.getNumberOfIncoming(); timerName = DRUID_QUERY_TIMER + String.format(format, seqNum); } else {
context = new RequestContext(containerRequestContext, readCache);