@Override public void seek(TopicPartition partition, OffsetAndMetadata offsetAndMetadata) { ensureNotClosed(); subscriptions.seek(partition, offsetAndMetadata.offset()); }
private String getOffsetMapString() { StringBuilder sb = new StringBuilder(); sb.append(getName()).append(" current offsets map: "); for (TopicPartition tp : offsets.keySet()) { sb.append("p").append(tp.partition()).append("-") .append(offsets.get(tp).offset()).append(" "); } return sb.toString(); }
private TxnOffsetCommitHandler txnOffsetCommitHandler(TransactionalRequestResult result, Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId) { for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { OffsetAndMetadata offsetAndMetadata = entry.getValue(); CommittedOffset committedOffset = new CommittedOffset(offsetAndMetadata.offset(), offsetAndMetadata.metadata(), offsetAndMetadata.leaderEpoch()); pendingTxnOffsetCommits.put(entry.getKey(), committedOffset); } TxnOffsetCommitRequest.Builder builder = new TxnOffsetCommitRequest.Builder(transactionalId, consumerGroupId, producerIdAndEpoch.producerId, producerIdAndEpoch.epoch, pendingTxnOffsetCommits); return new TxnOffsetCommitHandler(result, builder); }
/** * Refresh the committed offsets for provided partitions. * * @param timer Timer bounding how long this method can block * @return true iff the operation completed within the timeout */ public boolean refreshCommittedOffsetsIfNeeded(Timer timer) { final Set<TopicPartition> missingFetchPositions = subscriptions.missingFetchPositions(); final Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(missingFetchPositions, timer); if (offsets == null) return false; for (final Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { final TopicPartition tp = entry.getKey(); final long offset = entry.getValue().offset(); log.debug("Setting offset for partition {} to the committed offset {}", tp, offset); entry.getValue().leaderEpoch().ifPresent(epoch -> this.metadata.updateLastSeenEpochIfNewer(entry.getKey(), epoch)); this.subscriptions.seek(tp, offset); } return true; }
private void updateFetchPosition(TopicPartition tp) { if (subscriptions.isOffsetResetNeeded(tp)) { resetOffsetPosition(tp); } else if (!committed.containsKey(tp)) { subscriptions.requestOffsetReset(tp); resetOffsetPosition(tp); } else { subscriptions.seek(tp, committed.get(tp).offset()); } }
@Override public Long getCommittedOffset(String topicName, int partition) { OffsetAndMetadata committed = offsetClient.committed(new TopicPartition(topicName, partition)); return (committed != null) ? committed.offset() : null; }
@Override public Long getCommittedOffset(String topicName, int partition) { OffsetAndMetadata committed = offsetClient.committed(new TopicPartition(topicName, partition)); return (committed != null) ? committed.offset() : null; }
@Override public Long getCommittedOffset(String topicName, int partition) { OffsetAndMetadata committed = offsetClient.committed(new TopicPartition(topicName, partition)); return (committed != null) ? committed.offset() : null; }
@Override public Long getCommittedOffset(String topicName, int partition) { OffsetAndMetadata committed = offsetClient.committed(new TopicPartition(topicName, partition)); return (committed != null) ? committed.offset() : null; }
void verifyAllMessagesCommitted(long messageCount) { verify(consumerSpy).commitSync(commitCapture.capture()); final Map<TopicPartition, OffsetAndMetadata> commits = commitCapture.getValue(); assertThat("Expected commits for only one topic partition", commits.entrySet().size(), is(1)); OffsetAndMetadata offset = commits.entrySet().iterator().next().getValue(); assertThat("Expected committed offset to cover all emitted messages", offset.offset(), is(messageCount)); reset(consumerSpy); } }
public static <K, V> void verifyAllMessagesCommitted(Consumer<K, V> consumerSpy, ArgumentCaptor<Map<TopicPartition, OffsetAndMetadata>> commitCapture, long messageCount) { verify(consumerSpy, times(1)).commitSync(commitCapture.capture()); Map<TopicPartition, OffsetAndMetadata> commits = commitCapture.getValue(); assertThat("Expected commits for only one topic partition", commits.entrySet().size(), is(1)); OffsetAndMetadata offset = commits.entrySet().iterator().next().getValue(); assertThat("Expected committed offset to cover all emitted messages", offset.offset(), is(messageCount)); }
@Test public void testFindNextCommitOffsetWithOneAck() { /* * The KafkaConsumer commitSync API docs: "The committed offset should be the next message your application will consume, i.e. * lastProcessedMessageOffset + 1. " */ emitAndAckMessage(getMessageId(initialFetchOffset)); OffsetAndMetadata nextCommitOffset = manager.findNextCommitOffset(COMMIT_METADATA); assertThat("The next commit offset should be one past the processed message offset", nextCommitOffset.offset(), is(initialFetchOffset + 1)); }
@Test public void testFindNextCommitOffsetWithUnackedOffsetGap() { manager.addToEmitMsgs(initialFetchOffset + 1); emitAndAckMessage(getMessageId(initialFetchOffset)); OffsetAndMetadata nextCommitOffset = manager.findNextCommitOffset(COMMIT_METADATA); assertThat("The next commit offset should cover the contiguously acked offsets", nextCommitOffset.offset(), is(initialFetchOffset + 1)); }
@Test public void testAsyncCommitCallbacksInvokedPriorToSyncCommitCompletion() throws Exception { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); final List<OffsetAndMetadata> committedOffsets = Collections.synchronizedList(new ArrayList<OffsetAndMetadata>()); final OffsetAndMetadata firstOffset = new OffsetAndMetadata(0L); final OffsetAndMetadata secondOffset = new OffsetAndMetadata(1L); coordinator.commitOffsetsAsync(singletonMap(t1p, firstOffset), new OffsetCommitCallback() { @Override public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) { committedOffsets.add(firstOffset); } }); // Do a synchronous commit in the background so that we can send both responses at the same time Thread thread = new Thread() { @Override public void run() { coordinator.commitOffsetsSync(singletonMap(t1p, secondOffset), time.timer(10000)); committedOffsets.add(secondOffset); } }; thread.start(); client.waitForRequests(2, 5000); respondToOffsetCommitRequest(singletonMap(t1p, firstOffset.offset()), Errors.NONE); respondToOffsetCommitRequest(singletonMap(t1p, secondOffset.offset()), Errors.NONE); thread.join(); assertEquals(Arrays.asList(firstOffset, secondOffset), committedOffsets); }
@Test public void testFindNextCommitOffsetWithMultipleOutOfOrderAcks() { emitAndAckMessage(getMessageId(initialFetchOffset + 1)); emitAndAckMessage(getMessageId(initialFetchOffset)); OffsetAndMetadata nextCommitOffset = manager.findNextCommitOffset(COMMIT_METADATA); assertThat("The next commit offset should be one past the processed message offset", nextCommitOffset.offset(), is(initialFetchOffset + 2)); }
@Test public void testFindNextOffsetWithAckedButNotEmittedOffsetGap() { /** * If topic compaction is enabled in Kafka some offsets may be deleted. * We distinguish this case from regular gaps in the acked offset sequence caused by out of order acking * by checking that offsets in the gap have been emitted at some point previously. * If they haven't then they can't exist in Kafka, since the spout emits tuples in order. */ emitAndAckMessage(getMessageId(initialFetchOffset + 2)); emitAndAckMessage(getMessageId(initialFetchOffset)); OffsetAndMetadata nextCommitOffset = manager.findNextCommitOffset(COMMIT_METADATA); assertThat("The next commit offset should cover all the acked offsets, since the offset in the gap hasn't been emitted and doesn't exist", nextCommitOffset.offset(), is(initialFetchOffset + 3)); }
@Test public void testFindNextCommitOffsetWithAckedOffsetGap() { emitAndAckMessage(getMessageId(initialFetchOffset + 2)); manager.addToEmitMsgs(initialFetchOffset + 1); emitAndAckMessage(getMessageId(initialFetchOffset)); OffsetAndMetadata nextCommitOffset = manager.findNextCommitOffset(COMMIT_METADATA); assertThat("The next commit offset should cover the sequential acked offsets", nextCommitOffset.offset(), is(initialFetchOffset + 1)); }
@Test public void testSimpleMock() { consumer.subscribe(Collections.singleton("test")); assertEquals(0, consumer.poll(Duration.ZERO).count()); consumer.rebalance(Arrays.asList(new TopicPartition("test", 0), new TopicPartition("test", 1))); // Mock consumers need to seek manually since they cannot automatically reset offsets HashMap<TopicPartition, Long> beginningOffsets = new HashMap<>(); beginningOffsets.put(new TopicPartition("test", 0), 0L); beginningOffsets.put(new TopicPartition("test", 1), 0L); consumer.updateBeginningOffsets(beginningOffsets); consumer.seek(new TopicPartition("test", 0), 0); ConsumerRecord<String, String> rec1 = new ConsumerRecord<>("test", 0, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, "key1", "value1"); ConsumerRecord<String, String> rec2 = new ConsumerRecord<>("test", 0, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, "key2", "value2"); consumer.addRecord(rec1); consumer.addRecord(rec2); ConsumerRecords<String, String> recs = consumer.poll(Duration.ofMillis(1)); Iterator<ConsumerRecord<String, String>> iter = recs.iterator(); assertEquals(rec1, iter.next()); assertEquals(rec2, iter.next()); assertFalse(iter.hasNext()); assertEquals(2L, consumer.position(new TopicPartition("test", 0))); consumer.commitSync(); assertEquals(2L, consumer.committed(new TopicPartition("test", 0)).offset()); }
@SuppressWarnings("deprecation") @Test public void testSimpleMockDeprecated() { consumer.subscribe(Collections.singleton("test")); assertEquals(0, consumer.poll(1000).count()); consumer.rebalance(Arrays.asList(new TopicPartition("test", 0), new TopicPartition("test", 1))); // Mock consumers need to seek manually since they cannot automatically reset offsets HashMap<TopicPartition, Long> beginningOffsets = new HashMap<>(); beginningOffsets.put(new TopicPartition("test", 0), 0L); beginningOffsets.put(new TopicPartition("test", 1), 0L); consumer.updateBeginningOffsets(beginningOffsets); consumer.seek(new TopicPartition("test", 0), 0); ConsumerRecord<String, String> rec1 = new ConsumerRecord<>("test", 0, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, "key1", "value1"); ConsumerRecord<String, String> rec2 = new ConsumerRecord<>("test", 0, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, "key2", "value2"); consumer.addRecord(rec1); consumer.addRecord(rec2); ConsumerRecords<String, String> recs = consumer.poll(1); Iterator<ConsumerRecord<String, String>> iter = recs.iterator(); assertEquals(rec1, iter.next()); assertEquals(rec2, iter.next()); assertFalse(iter.hasNext()); assertEquals(2L, consumer.position(new TopicPartition("test", 0))); consumer.commitSync(); assertEquals(2L, consumer.committed(new TopicPartition("test", 0)).offset()); }
@Test public void testCommitsFetchedDuringAssign() { long offset1 = 10000; long offset2 = 20000; Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 2)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true); consumer.assign(singletonList(tp0)); // lookup coordinator client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); // fetch offset for one topic client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, offset1), Errors.NONE), coordinator); assertEquals(offset1, consumer.committed(tp0).offset()); consumer.assign(Arrays.asList(tp0, tp1)); // fetch offset for two topics Map<TopicPartition, Long> offsets = new HashMap<>(); offsets.put(tp0, offset1); client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator); assertEquals(offset1, consumer.committed(tp0).offset()); offsets.remove(tp0); offsets.put(tp1, offset2); client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator); assertEquals(offset2, consumer.committed(tp1).offset()); consumer.close(Duration.ofMillis(0)); }