public void commitSync(Duration timeout) { delegate.commitSync(timeout); }
@Override public void onPartitionsRevoked(Collection<TopicPartition> collection) { _metricConsumer.commitSync(); }
@Override public void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) { delegate.commitSync(offsets); }
public void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets, Duration timeout) { delegate.commitSync(offsets, timeout); }
@Override public void commitSync() { delegate.commitSync(); }
public static <K, V> void verifyAllMessagesCommitted(Consumer<K, V> consumerSpy, ArgumentCaptor<Map<TopicPartition, OffsetAndMetadata>> commitCapture, long messageCount) { verify(consumerSpy, times(1)).commitSync(commitCapture.capture()); Map<TopicPartition, OffsetAndMetadata> commits = commitCapture.getValue(); assertThat("Expected commits for only one topic partition", commits.entrySet().size(), is(1)); OffsetAndMetadata offset = commits.entrySet().iterator().next().getValue(); assertThat("Expected committed offset to cover all emitted messages", offset.offset(), is(messageCount)); }
consumer.commitSync(nextCommitOffsets); LOG.debug("Offsets successfully committed to Kafka [{}]", nextCommitOffsets);
private ConsumerRecords<K, V> pollKafkaBroker(PollablePartitionsInfo pollablePartitionsInfo) { doSeekRetriableTopicPartitions(pollablePartitionsInfo.pollableEarliestRetriableOffsets); Set<TopicPartition> pausedPartitions = new HashSet<>(consumer.assignment()); pausedPartitions.removeIf(pollablePartitionsInfo.pollablePartitions::contains); try { consumer.pause(pausedPartitions); final ConsumerRecords<K, V> consumerRecords = consumer.poll(kafkaSpoutConfig.getPollTimeoutMs()); ackRetriableOffsetsIfCompactedAway(pollablePartitionsInfo.pollableEarliestRetriableOffsets, consumerRecords); final int numPolledRecords = consumerRecords.count(); LOG.debug("Polled [{}] records from Kafka", numPolledRecords); if (kafkaSpoutConfig.getProcessingGuarantee() == KafkaSpoutConfig.ProcessingGuarantee.AT_MOST_ONCE) { //Commit polled records immediately to ensure delivery is at-most-once. Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = createFetchedOffsetsMetadata(consumer.assignment()); consumer.commitSync(offsetsToCommit); LOG.debug("Committed offsets {} to Kafka", offsetsToCommit); } return consumerRecords; } finally { consumer.resume(pausedPartitions); } }
kafkaConsumer.commitSync(uncommittedOffsetsMap); resetInternalState(); return true;
kafkaConsumer.commitSync(offsetsMap); resetInternalState(); return true;
kafkaConsumer.commitSync(offsetsMap); resetInternalState(); return true;
kafkaConsumer.commitSync(offsetsMap); resetInternalState(); return true;
kafkaConsumer.commitSync(offsetsMap); resetInternalState(); return true;
@SuppressWarnings("unchecked") @Test public void remaingRecordsReceived() throws Exception { assertThat(this.config.deliveryLatch.await(10, TimeUnit.SECONDS)).isTrue(); assertThat(this.config.commitLatch.await(10, TimeUnit.SECONDS)).isTrue(); assertThat(this.config.pollLatch.await(10, TimeUnit.SECONDS)).isTrue(); assertThat(this.config.errorLatch.await(10, TimeUnit.SECONDS)).isTrue(); InOrder inOrder = inOrder(this.consumer); inOrder.verify(this.consumer).subscribe(any(Collection.class), any(ConsumerRebalanceListener.class)); inOrder.verify(this.consumer).poll(Duration.ofMillis(ContainerProperties.DEFAULT_POLL_TIMEOUT)); inOrder.verify(this.consumer).commitSync( Collections.singletonMap(new TopicPartition("foo", 0), new OffsetAndMetadata(1L))); inOrder.verify(this.consumer).commitSync( Collections.singletonMap(new TopicPartition("foo", 0), new OffsetAndMetadata(2L))); inOrder.verify(this.consumer).commitSync( Collections.singletonMap(new TopicPartition("foo", 1), new OffsetAndMetadata(1L))); assertThat(this.config.count).isEqualTo(4); assertThat(this.config.contents).containsExactly("foo", "bar", "baz", "qux"); assertThat(this.config.remaining).containsExactly("qux", "fiz", "buz"); }
inOrder.verify(this.consumer).subscribe(any(Collection.class), any(ConsumerRebalanceListener.class)); inOrder.verify(this.consumer).poll(Duration.ofMillis(ContainerProperties.DEFAULT_POLL_TIMEOUT)); inOrder.verify(this.consumer).commitSync( Collections.singletonMap(new TopicPartition("foo", 0), new OffsetAndMetadata(1L))); inOrder.verify(this.consumer).commitSync( Collections.singletonMap(new TopicPartition("foo", 0), new OffsetAndMetadata(2L))); inOrder.verify(this.consumer).commitSync( Collections.singletonMap(new TopicPartition("foo", 1), new OffsetAndMetadata(1L))); inOrder.verify(this.consumer).seek(new TopicPartition("foo", 1), 1L); inOrder.verify(this.consumer).seek(new TopicPartition("foo", 2), 0L); inOrder.verify(this.consumer).poll(Duration.ofMillis(ContainerProperties.DEFAULT_POLL_TIMEOUT)); inOrder.verify(this.consumer).commitSync( Collections.singletonMap(new TopicPartition("foo", 1), new OffsetAndMetadata(2L))); inOrder.verify(this.consumer).commitSync( Collections.singletonMap(new TopicPartition("foo", 2), new OffsetAndMetadata(1L))); inOrder.verify(this.consumer).commitSync( Collections.singletonMap(new TopicPartition("foo", 2), new OffsetAndMetadata(2L))); inOrder.verify(this.consumer).poll(Duration.ofMillis(ContainerProperties.DEFAULT_POLL_TIMEOUT));
@SuppressWarnings("unchecked") @Test public void stopContainerAfterException() throws Exception { assertThat(this.config.deliveryLatch.await(10, TimeUnit.SECONDS)).isTrue(); assertThat(this.config.commitLatch.await(10, TimeUnit.SECONDS)).isTrue(); assertThat(this.config.pollLatch.await(10, TimeUnit.SECONDS)).isTrue(); assertThat(this.config.errorLatch.await(10, TimeUnit.SECONDS)).isTrue(); assertThat(this.config.closeLatch.await(10, TimeUnit.SECONDS)).isTrue(); MessageListenerContainer container = this.registry.getListenerContainer(CONTAINER_ID); assertThat(container.isRunning()).isFalse(); InOrder inOrder = inOrder(this.consumer); inOrder.verify(this.consumer).subscribe(any(Collection.class), any(ConsumerRebalanceListener.class)); inOrder.verify(this.consumer).poll(Duration.ofMillis(ContainerProperties.DEFAULT_POLL_TIMEOUT)); inOrder.verify(this.consumer).commitSync( Collections.singletonMap(new TopicPartition("foo", 0), new OffsetAndMetadata(1L))); inOrder.verify(this.consumer).commitSync( Collections.singletonMap(new TopicPartition("foo", 0), new OffsetAndMetadata(2L))); inOrder.verify(this.consumer).commitSync( Collections.singletonMap(new TopicPartition("foo", 1), new OffsetAndMetadata(1L))); inOrder.verify(this.consumer).wakeup(); inOrder.verify(this.consumer).unsubscribe(); inOrder.verify(this.consumer).close(); inOrder.verifyNoMoreInteractions(); assertThat(this.config.count).isEqualTo(4); assertThat(this.config.contents.toArray()).isEqualTo(new String[] { "foo", "bar", "baz", "qux" }); }
@SuppressWarnings("unchecked") @Test public void discardRemainingRecordsFromPollAndSeek() throws Exception { assertThat(this.config.deliveryLatch.await(10, TimeUnit.SECONDS)).isTrue(); assertThat(this.config.commitLatch.await(10, TimeUnit.SECONDS)).isTrue(); assertThat(this.config.pollLatch.await(10, TimeUnit.SECONDS)).isTrue(); this.registry.stop(); assertThat(this.config.closeLatch.await(10, TimeUnit.SECONDS)).isTrue(); InOrder inOrder = inOrder(this.consumer); inOrder.verify(this.consumer).subscribe(any(Collection.class), any(ConsumerRebalanceListener.class)); inOrder.verify(this.consumer).poll(Duration.ofMillis(ContainerProperties.DEFAULT_POLL_TIMEOUT)); Map<TopicPartition, OffsetAndMetadata> offsets = new LinkedHashMap<>(); offsets.put(new TopicPartition("foo", 0), new OffsetAndMetadata(2L)); offsets.put(new TopicPartition("foo", 1), new OffsetAndMetadata(1L)); inOrder.verify(this.consumer).commitSync(offsets); inOrder.verify(this.consumer).seek(new TopicPartition("foo", 1), 1L); inOrder.verify(this.consumer).seek(new TopicPartition("foo", 2), 0L); inOrder.verify(this.consumer).poll(Duration.ofMillis(ContainerProperties.DEFAULT_POLL_TIMEOUT)); offsets = new LinkedHashMap<>(); offsets.put(new TopicPartition("foo", 1), new OffsetAndMetadata(2L)); offsets.put(new TopicPartition("foo", 2), new OffsetAndMetadata(2L)); inOrder.verify(this.consumer).commitSync(offsets); inOrder.verify(this.consumer).poll(Duration.ofMillis(ContainerProperties.DEFAULT_POLL_TIMEOUT)); assertThat(this.config.count).isEqualTo(7); assertThat(this.config.contents.toArray()).isEqualTo(new String[] { "foo", "bar", "baz", "qux", "qux", "fiz", "buz" }); }
this.commitLatch.countDown(); return null; }).given(consumer).commitSync(any(Map.class)); return consumer;
@SuppressWarnings({ "unchecked", "rawtypes" }) @Bean public KafkaListenerContainerFactory<?> batchSpyFactory() { ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>(); ConsumerFactory spiedCf = mock(ConsumerFactory.class); willAnswer(i -> { Consumer<Integer, CharSequence> spy = spy(consumerFactory().createConsumer(i.getArgument(0), i.getArgument(1), i.getArgument(2))); willAnswer(invocation -> { try { return invocation.callRealMethod(); } finally { spyLatch.countDown(); } }).given(spy).commitSync(anyMap()); return spy; }).given(spiedCf).createConsumer(anyString(), anyString(), anyString()); factory.setConsumerFactory(spiedCf); factory.setBatchListener(true); factory.setRecordFilterStrategy(recordFilter()); // always send to the same partition so the replies are in order for the test factory.setReplyTemplate(partitionZeroReplyingTemplate()); return factory; }
verify(consumer, times(1)).commitSync(anyMap()); container.stop(); verify(consumer, times(2)).commitSync(anyMap());