private FetchResponse<MemoryRecords> createFetchResponse(Errors error, int sessionId) { return new FetchResponse<>(error, new LinkedHashMap<>(), 25, sessionId); }
public static FetchResponse<MemoryRecords> parse(ByteBuffer buffer, short version) { return parse(ApiKeys.FETCH.responseSchema(version).read(buffer)); }
int implied = sessionPartitions.size() - response.responseData().size(); if (implied > 0) { return String.format(" with %d response partition(s), %d implied partition(s)", response.responseData().size(), implied); } else { return String.format(" with %d response partition(s)", response.responseData().size()); append(Utils.join(response.responseData().keySet(), ", ")). append(")"); String prefix = ", implied=("; String suffix = ""; for (TopicPartition partition : sessionPartitions.keySet()) { if (!response.responseData().containsKey(partition)) { bld.append(prefix); bld.append(partition);
@Override protected Send toSend(String dest, ResponseHeader responseHeader, short apiVersion) { Struct responseHeaderStruct = responseHeader.toStruct(); Struct responseBodyStruct = toStruct(apiVersion); // write the total size and the response header ByteBuffer buffer = ByteBuffer.allocate(responseHeaderStruct.sizeOf() + 4); buffer.putInt(responseHeaderStruct.sizeOf() + responseBodyStruct.sizeOf()); responseHeaderStruct.writeTo(buffer); buffer.rewind(); Queue<Send> sends = new ArrayDeque<>(); sends.add(new ByteBufferSend(dest, buffer)); addResponseData(responseBodyStruct, throttleTimeMs, dest, sends); return new MultiRecordsSend(dest, sends); }
if (response.error() != Errors.NONE) { log.info("Node {} was unable to process the fetch request with {}: {}.", node, nextMetadata, response.error()); if (response.error() == Errors.FETCH_SESSION_ID_NOT_FOUND) { nextMetadata = FetchMetadata.INITIAL; } else { nextMetadata = FetchMetadata.INITIAL; return false; } else if (response.sessionId() == INVALID_SESSION_ID) { log.debug("Node {} sent a full fetch response{}", node, responseDataToLogString(response)); "fetch session {}{}", node, response.sessionId(), responseDataToLogString(response)); nextMetadata = FetchMetadata.newIncremental(response.sessionId()); return true; nextMetadata = nextMetadata.nextCloseExisting(); return false; } else if (response.sessionId() == INVALID_SESSION_ID) { node, response.sessionId(), responseDataToLogString(response)); nextMetadata = nextMetadata.nextIncremental(); return true;
@Test public void testFetchResponseV4() { LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> responseData = new LinkedHashMap<>(); MemoryRecords records = MemoryRecords.readableRecords(ByteBuffer.allocate(10)); List<FetchResponse.AbortedTransaction> abortedTransactions = asList( new FetchResponse.AbortedTransaction(10, 100), new FetchResponse.AbortedTransaction(15, 50) ); responseData.put(new TopicPartition("bar", 0), new FetchResponse.PartitionData<>(Errors.NONE, 100000, FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, abortedTransactions, records)); responseData.put(new TopicPartition("bar", 1), new FetchResponse.PartitionData<>(Errors.NONE, 900000, 5, FetchResponse.INVALID_LOG_START_OFFSET, null, records)); responseData.put(new TopicPartition("foo", 0), new FetchResponse.PartitionData<>(Errors.NONE, 70000, 6, FetchResponse.INVALID_LOG_START_OFFSET, Collections.emptyList(), records)); FetchResponse<MemoryRecords> response = new FetchResponse<>(Errors.NONE, responseData, 10, INVALID_SESSION_ID); FetchResponse deserialized = FetchResponse.parse(toBuffer(response.toStruct((short) 4)), (short) 4); assertEquals(responseData, deserialized.responseData()); }
@Test public void fetchResponseVersionTest() { LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> responseData = new LinkedHashMap<>(); MemoryRecords records = MemoryRecords.readableRecords(ByteBuffer.allocate(10)); responseData.put(new TopicPartition("test", 0), new FetchResponse.PartitionData<>( Errors.NONE, 1000000, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records)); FetchResponse<MemoryRecords> v0Response = new FetchResponse<>(Errors.NONE, responseData, 0, INVALID_SESSION_ID); FetchResponse<MemoryRecords> v1Response = new FetchResponse<>(Errors.NONE, responseData, 10, INVALID_SESSION_ID); assertEquals("Throttle time must be zero", 0, v0Response.throttleTimeMs()); assertEquals("Throttle time must be 10", 10, v1Response.throttleTimeMs()); assertEquals("Should use schema version 0", ApiKeys.FETCH.responseSchema((short) 0), v0Response.toStruct((short) 0).schema()); assertEquals("Should use schema version 1", ApiKeys.FETCH.responseSchema((short) 1), v1Response.toStruct((short) 1).schema()); assertEquals("Response data does not match", responseData, v0Response.responseData()); assertEquals("Response data does not match", responseData, v1Response.responseData()); }
private void verifyFetchResponseFullWrite(short apiVersion, FetchResponse fetchResponse) throws Exception { int correlationId = 15; Send send = fetchResponse.toSend("1", new ResponseHeader(correlationId), apiVersion); ByteBufferChannel channel = new ByteBufferChannel(send.size()); send.writeTo(channel); channel.close(); ByteBuffer buf = channel.buffer(); // read the size int size = buf.getInt(); assertTrue(size > 0); // read the header ResponseHeader responseHeader = ResponseHeader.parse(channel.buffer()); assertEquals(correlationId, responseHeader.correlationId()); // read the body Struct responseBody = ApiKeys.FETCH.responseSchema(apiVersion).read(buf); assertEquals(fetchResponse.toStruct(apiVersion), responseBody); assertEquals(size, responseHeader.sizeOf() + responseBody.sizeOf()); }
@Override public Struct toStruct(short version) { return toStruct(version, throttleTimeMs, error, responseData.entrySet().iterator(), sessionId); }
private static void addTopicData(String dest, Queue<Send> sends, Struct topicData) { String topic = topicData.get(TOPIC_NAME); Object[] allPartitionData = topicData.getArray(PARTITIONS_KEY_NAME); // include the topic header and the count for the number of partitions ByteBuffer buffer = ByteBuffer.allocate(STRING.sizeOf(topic) + 4); STRING.write(buffer, topic); buffer.putInt(allPartitionData.length); buffer.rewind(); sends.add(new ByteBufferSend(dest, buffer)); for (Object partitionData : allPartitionData) addPartitionData(dest, sends, (Struct) partitionData); }
private static void addResponseData(Struct struct, int throttleTimeMs, String dest, Queue<Send> sends) { Object[] allTopicData = struct.getArray(RESPONSES_KEY_NAME); if (struct.hasField(ERROR_CODE)) { ByteBuffer buffer = ByteBuffer.allocate(14); buffer.putInt(throttleTimeMs); buffer.putShort(struct.get(ERROR_CODE)); buffer.putInt(struct.get(SESSION_ID)); buffer.putInt(allTopicData.length); buffer.rewind(); sends.add(new ByteBufferSend(dest, buffer)); } else if (struct.hasField(THROTTLE_TIME_MS)) { ByteBuffer buffer = ByteBuffer.allocate(8); buffer.putInt(throttleTimeMs); buffer.putInt(allTopicData.length); buffer.rewind(); sends.add(new ByteBufferSend(dest, buffer)); } else { ByteBuffer buffer = ByteBuffer.allocate(4); buffer.putInt(allTopicData.length); buffer.rewind(); sends.add(new ByteBufferSend(dest, buffer)); } for (Object topicData : allTopicData) addTopicData(dest, sends, (Struct) topicData); }
client.poll(1, time.milliseconds()); FetchResponse response = fullFetchResponse(tp0, nextRecords, Errors.NONE, i, throttleTimeMs); buffer = response.serialize(ApiKeys.FETCH.latestVersion(), new ResponseHeader(request.correlationId())); selector.completeReceive(new NetworkReceive(node.idString(), buffer)); client.poll(1, time.milliseconds());
/** * Convenience method to find the size of a response. * * @param version The version of the response to use. * @param partIterator The partition iterator. * @return The response size in bytes. */ public static <T extends BaseRecords> int sizeOf(short version, Iterator<Map.Entry<TopicPartition, PartitionData<T>>> partIterator) { // Since the throttleTimeMs and metadata field sizes are constant and fixed, we can // use arbitrary values here without affecting the result. return 4 + toStruct(version, 0, Errors.NONE, partIterator, INVALID_SESSION_ID).sizeOf(); }
@Override public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { // The error is indicated in two ways: by setting the same error code in all partitions, and by // setting the top-level error code. The form where we set the same error code in all partitions // is needed in order to maintain backwards compatibility with older versions of the protocol // in which there was no top-level error code. Note that for incremental fetch responses, there // may not be any partitions at all in the response. For this reason, the top-level error code // is essential for them. Errors error = Errors.forException(e); LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> responseData = new LinkedHashMap<>(); for (Map.Entry<TopicPartition, PartitionData> entry : fetchData.entrySet()) { FetchResponse.PartitionData<MemoryRecords> partitionResponse = new FetchResponse.PartitionData<>(error, FetchResponse.INVALID_HIGHWATERMARK, FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, null, MemoryRecords.EMPTY); responseData.put(entry.getKey(), partitionResponse); } return new FetchResponse<>(error, responseData, throttleTimeMs, metadata.sessionId()); }
/** * Verify that the partitions in an incremental fetch response are contained in the session. * * @param response The response. * @return True if the incremental fetch response partitions are valid. */ private String verifyIncrementalFetchResponsePartitions(FetchResponse<?> response) { Set<TopicPartition> extra = findMissing(response.responseData().keySet(), sessionPartitions.keySet()); if (!extra.isEmpty()) { StringBuilder bld = new StringBuilder(); bld.append("extra=(").append(Utils.join(extra, ", ")).append("), "); bld.append("response=(").append( Utils.join(response.responseData().keySet(), ", ")).append("), "); return bld.toString(); } return null; }
return new ProduceResponse(struct); case FETCH: return FetchResponse.parse(struct); case LIST_OFFSETS: return new ListOffsetResponse(struct);
private FetchResponse<MemoryRecords> fullFetchResponse(TopicPartition tp, MemoryRecords records, Errors error, long hw, long lastStableOffset, int throttleTime) { Map<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> partitions = Collections.singletonMap(tp, new FetchResponse.PartitionData<>(error, hw, lastStableOffset, 0L, null, records)); return new FetchResponse<>(Errors.NONE, new LinkedHashMap<>(partitions), throttleTime, INVALID_SESSION_ID); }
/** * Verify that a full fetch response contains all the partitions in the fetch session. * * @param response The response. * @return True if the full fetch response partitions are valid. */ private String verifyFullFetchResponsePartitions(FetchResponse<?> response) { StringBuilder bld = new StringBuilder(); Set<TopicPartition> omitted = findMissing(response.responseData().keySet(), sessionPartitions.keySet()); Set<TopicPartition> extra = findMissing(sessionPartitions.keySet(), response.responseData().keySet()); if (!omitted.isEmpty()) { bld.append("omitted=(").append(Utils.join(omitted, ", ")).append(", "); } if (!extra.isEmpty()) { bld.append("extra=(").append(Utils.join(extra, ", ")).append(", "); } if ((!omitted.isEmpty()) || (!extra.isEmpty())) { bld.append("response=(").append(Utils.join(response.responseData().keySet(), ", ")); return bld.toString(); } return null; }
private FetchResponse<MemoryRecords> fullFetchResponseWithAbortedTransactions(MemoryRecords records, List<FetchResponse.AbortedTransaction> abortedTransactions, Errors error, long lastStableOffset, long hw, int throttleTime) { Map<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> partitions = Collections.singletonMap(tp0, new FetchResponse.PartitionData<>(error, hw, lastStableOffset, 0L, abortedTransactions, records)); return new FetchResponse<>(Errors.NONE, new LinkedHashMap<>(partitions), throttleTime, INVALID_SESSION_ID); }
@Override public void onSuccess(ClientResponse resp) { synchronized (Fetcher.this) { @SuppressWarnings("unchecked") FetchResponse<Records> response = (FetchResponse<Records>) resp.responseBody(); FetchSessionHandler handler = sessionHandler(fetchTarget.id()); if (handler == null) { log.error("Unable to find FetchSessionHandler for node {}. Ignoring fetch response.", fetchTarget.id()); return; } if (!handler.handleResponse(response)) { return; } Set<TopicPartition> partitions = new HashSet<>(response.responseData().keySet()); FetchResponseMetricAggregator metricAggregator = new FetchResponseMetricAggregator(sensors, partitions); for (Map.Entry<TopicPartition, FetchResponse.PartitionData<Records>> entry : response.responseData().entrySet()) { TopicPartition partition = entry.getKey(); long fetchOffset = data.sessionPartitions().get(partition).fetchOffset; FetchResponse.PartitionData<Records> fetchData = entry.getValue(); log.debug("Fetch {} at offset {} for partition {} returned fetch data {}", isolationLevel, fetchOffset, partition, fetchData); completedFetches.add(new CompletedFetch(partition, fetchOffset, fetchData, metricAggregator, resp.requestHeader().apiVersion())); } sensors.fetchLatency.record(resp.requestLatencyMs()); } }