/** * Checks and invokes error if the number of missing intervals are overflowed, i.e. more than the configured limit. * * @param json The json object containing the overflow flag * @param query The query with the schema for processing this response */ private void checkOverflow(JsonNode json, DruidAggregationQuery<?> query) { if (json.get(DruidJsonResponseContentKeys.DRUID_RESPONSE_CONTEXT.getName()) .get(DruidJsonResponseContentKeys.UNCOVERED_INTERVALS_OVERFLOWED.getName()) .asBoolean() ) { logAndGetErrorCallback( ErrorMessageFormat.TOO_MANY_INTERVALS_MISSING.format( query.getContext().getUncoveredIntervalsLimit() ), query ); } }
if (!json.has(DruidJsonResponseContentKeys.STATUS_CODE.getName())) { logAndGetErrorCallback(ErrorMessageFormat.STATUS_CODE_MISSING_FROM_RESPONSE.format(), druidQuery); return; int statusCode = json.get(DruidJsonResponseContentKeys.STATUS_CODE.getName()).asInt(); try { ((ObjectNode) json).set( DruidJsonResponseContentKeys.RESPONSE.getName(), mapper.readTree(dataCache.getDataValue(cacheKey)) ); if (!json.has(DruidJsonResponseContentKeys.ETAG.getName())) { LOG.warn(ErrorMessageFormat.ETAG_MISSING_FROM_RESPONSE.format()); } else { dataCache.set( cacheKey, json.get(DruidJsonResponseContentKeys.ETAG.getName()).asText(), writer.writeValueAsString(json.get(DruidJsonResponseContentKeys.RESPONSE.getName())) ); } catch (JsonProcessingException exception) { DruidJsonResponseContentKeys.CACHED_RESPONSE.getName(), statusCode == NOT_MODIFIED.getStatusCode() ); next.processResponse(json, druidQuery, metadata); } else {
MappingJsonFactory mappingJsonFactory = new MappingJsonFactory(); ObjectNode objectNode = JsonNodeFactory.instance.objectNode(); objectNode.set(DruidJsonResponseContentKeys.RESPONSE.getName(), baseStrategy.apply(response)); try { objectNode.set( DruidJsonResponseContentKeys.DRUID_RESPONSE_CONTEXT.getName(), mappingJsonFactory .createParser( response.getHeader(DruidJsonResponseContentKeys.DRUID_RESPONSE_CONTEXT.getName()) int statusCode = response.getStatusCode(); objectNode.set( DruidJsonResponseContentKeys.STATUS_CODE.getName(), mappingJsonFactory.createParser(String.valueOf(statusCode)).readValueAsTree() ); if (CacheFeatureFlag.ETAG.isOn() && statusCode == OK.getStatusCode()) { objectNode.set( DruidJsonResponseContentKeys.ETAG.getName(), mappingJsonFactory .createParser( response.getHeader(DruidJsonResponseContentKeys.ETAG.getName())
if (!json.has(DruidJsonResponseContentKeys.DRUID_RESPONSE_CONTEXT.getName())) { logAndGetErrorCallback(ErrorMessageFormat.DRUID_RESPONSE_CONTEXT_MISSING_FROM_RESPONSE.format(), query); return; JsonNode druidResponseContext = json.get(DruidJsonResponseContentKeys.DRUID_RESPONSE_CONTEXT.getName()); if (!druidResponseContext.has(DruidJsonResponseContentKeys.UNCOVERED_INTERVALS.getName())) { logAndGetErrorCallback( ErrorMessageFormat.UNCOVERED_INTERVALS_MISSING_FROM_RESPONSE.format(), return; if (!druidResponseContext.has(DruidJsonResponseContentKeys.UNCOVERED_INTERVALS_OVERFLOWED.getName())) { logAndGetErrorCallback( ErrorMessageFormat.UNCOVERED_INTERVALS_OVERFLOWED_MISSING_FROM_RESPONSE.format(), return; if (!json.has(DruidJsonResponseContentKeys.STATUS_CODE.getName())) { logAndGetErrorCallback(ErrorMessageFormat.STATUS_CODE_MISSING_FROM_RESPONSE.format(), query);
validateJsonResponse(json, query); int statusCode = json.get(DruidJsonResponseContentKeys.STATUS_CODE.getName()).asInt(); if (statusCode == Status.OK.getStatusCode()) { checkOverflow(json, query); next.processResponse(json, query, metadata); } else { next.processResponse(json.get(DruidJsonResponseContentKeys.RESPONSE.getName()), query, metadata);
eTagInRequest, mapper.readTree(cacheEntry.getValue()) .get(DruidJsonResponseContentKeys.ETAG.getName()) .asText() );