public ReceiverOptions<Integer, String> createReceiverOptions(Map<String, Object> propsOverride, String groupId) { Map<String, Object> props = consumerProps(groupId); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "2"); if (propsOverride != null) props.putAll(propsOverride); receiverOptions = ReceiverOptions.create(props); receiverOptions.commitInterval(Duration.ofMillis(50)); receiverOptions.maxCommitAttempts(1); return receiverOptions; }
/** * Tests that inbound flux is terminated with an error if transient commit error persists * beyond maximum configured limit. */ @Test public void autoAckCommitTransientErrorMaxRetries() throws Exception { consumer.addCommitException(new RetriableCommitFailedException("coordinator failed"), 5); receiverOptions = receiverOptions .subscription(Collections.singleton(topic)) .maxCommitAttempts(5) .commitBatchSize(2); int count = 100; sendMessages(topic, 0, count); DefaultKafkaReceiver<Integer, String> receiver = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions); Semaphore errorSemaphore = new Semaphore(0); receiver.receiveAutoAck() .concatMap(r -> r) .doOnNext(r -> receivedMessages.add(r)) .doOnError(e -> errorSemaphore.release()) .subscribe(); assertTrue("Flux did not fail", errorSemaphore.tryAcquire(1, TimeUnit.SECONDS)); assertTrue("Commit failure did not fail flux", receivedMessages.size() < count); }
/** * Tests that inbound flux is terminated with an error if commit fails with non-retriable error. */ @Test public void autoAckCommitFatalError() throws Exception { consumer.addCommitException(new InvalidOffsetException("invalid offset"), 1); receiverOptions = receiverOptions .subscription(Collections.singleton(topic)) .maxCommitAttempts(5) .commitBatchSize(2); int count = 100; sendMessages(topic, 0, count); DefaultKafkaReceiver<Integer, String> receiver = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions); Semaphore errorSemaphore = new Semaphore(0); receiver.receiveAutoAck() .concatMap(r -> r) .doOnNext(r -> receivedMessages.add(r)) .doOnError(e -> errorSemaphore.release()) .subscribe(); assertTrue("Flux did not fail", errorSemaphore.tryAcquire(1, TimeUnit.SECONDS)); assertTrue("Commit failure did not fail flux", receivedMessages.size() < count); }
@Test public void autoCommitFailurePropagationAfterRetries() throws Exception { int count = 5; receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") .maxCommitAttempts(2); testAutoCommitFailureScenarios(true, count, 2, 0, Integer.MAX_VALUE); Flux<? extends ConsumerRecord<Integer, String>> flux = createReceiver().receive(); sendReceiveWithRedelivery(flux, count, count, 2, 5); }
@Test public void manualCommitRetry() throws Exception { consumer.addCommitException(new RetriableCommitFailedException("coordinator failed"), 2); int count = 10; receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ofMillis(Long.MAX_VALUE)) .maxCommitAttempts(1) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, count + 10); receiveAndVerify(10, record -> record.receiverOffset().commit().retry().then(Mono.just(record))); verifyCommits(groupId, topic, 10); }
/** * Tests that manual commit Mono is failed if commits did not succeed after a transient error * within the configured number of attempts. */ @Test public void manualCommitFailure() throws Exception { consumer.addCommitException(new RetriableCommitFailedException("coordinator failed"), 10); int count = 10; receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ofMillis(Long.MAX_VALUE)) .maxCommitAttempts(2) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, count + 10); receiveVerifyError(RetriableCommitFailedException.class, record -> record.receiverOffset().commit().retry(5).then(Mono.just(record)) ); }
/** * Tests that commits are retried if the failure is transient and the manual commit Mono * is not failed if the commit succeeds within the configured number of attempts. */ @Test public void manualCommitAttempts() throws Exception { consumer.addCommitException(new RetriableCommitFailedException("coordinator failed"), 2); int count = 10; receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ofMillis(Long.MAX_VALUE)) .maxCommitAttempts(10) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, count + 10); receiveAndVerify(10, record -> record.receiverOffset().commit().then(Mono.just(record))); verifyCommits(groupId, topic, 10); }
/** * Tests that {@link KafkaReceiver#receiveAtmostOnce()} commit failures terminate the inbound flux with * an error. */ @Test public void atmostOnceCommitFailure() throws Exception { consumer.addCommitException(new RetriableCommitFailedException("coordinator failed"), 10); int count = 10; receiverOptions = receiverOptions .maxCommitAttempts(2) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, count + 10); Flux<? extends ConsumerRecord<Integer, String>> inboundFlux = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions) .receiveAtmostOnce(); StepVerifier.create(inboundFlux) .expectError(RetriableCommitFailedException.class) .verify(Duration.ofMillis(DEFAULT_TEST_TIMEOUT)); }
@Test public void brokerRestart() throws Exception { int sendBatchSize = 10; receiverOptions = receiverOptions.maxCommitAttempts(1000); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = createReceiver() .receive() .doOnError(e -> log.error("KafkaFlux exception", e)); CountDownLatch receiveLatch = new CountDownLatch(sendBatchSize * 2); subscribe(kafkaFlux, receiveLatch); sendMessagesSync(0, sendBatchSize); shutdownKafkaBroker(); TestUtils.sleep(5000); restartKafkaBroker(); sendMessagesSync(sendBatchSize, sendBatchSize); waitForMessages(receiveLatch); checkConsumedMessages(); }
/** * Tests that transient commit failures are retried with {@link KafkaReceiver#receiveAtmostOnce()}. */ @Test public void atmostOnceCommitAttempts() throws Exception { consumer.addCommitException(new RetriableCommitFailedException("coordinator failed"), 2); receiverOptions = receiverOptions .maxCommitAttempts(10) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, 20); Flux<? extends ConsumerRecord<Integer, String>> inboundFlux = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions) .receiveAtmostOnce(); verifyMessages(inboundFlux.take(10), 10); verifyCommits(groupId, topic, 10); }
.maxCommitAttempts(100) .addAssignListener(this::seekToBeginning) .addAssignListener(this::onPartitionsAssigned)
ImmutableReceiverOptions(ReceiverOptions<K, V> options) { this( options.consumerProperties(), options.assignListeners(), options.revokeListeners(), options.keyDeserializer(), options.valueDeserializer(), options.pollTimeout(), options.closeTimeout(), options.commitInterval(), options.commitBatchSize(), options.atmostOnceCommitAheadSize(), options.maxCommitAttempts(), options.subscriptionTopics(), options.assignment(), options.subscriptionPattern(), options.schedulerSupplier() ); }
receiverOptions = receiverOptions.commitBatchSize(1) .commitInterval(Duration.ofMillis(1000)) .maxCommitAttempts(maxAttempts) .closeTimeout(Duration.ofMillis(1000)) .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
ImmutableReceiverOptions(ReceiverOptions<K, V> options) { this( options.consumerProperties(), options.assignListeners(), options.revokeListeners(), options.keyDeserializer(), options.valueDeserializer(), options.pollTimeout(), options.closeTimeout(), options.commitInterval(), options.commitBatchSize(), options.atmostOnceCommitAheadSize(), options.maxCommitAttempts(), options.subscriptionTopics(), options.assignment(), options.subscriptionPattern(), options.schedulerSupplier() ); }
/** * Tests that retriable commit exceptions are retried with {@link KafkaReceiver#receiveAutoAck()} */ @Test public void autoAckCommitTransientError() { consumer.addCommitException(new RetriableCommitFailedException("coordinator failed"), 3); receiverOptions = receiverOptions .subscription(Collections.singleton(topic)) .maxCommitAttempts(5) .commitBatchSize(2); sendMessages(topic, 0, 20); Flux<? extends ConsumerRecord<Integer, String>> inboundFlux = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions) .receiveAutoAck() .concatMap(r -> r); verifyMessages(inboundFlux.take(11), 11); receivedMessages.removeIf(r -> r.offset() >= 5); // Last record should not be committed verifyCommits(groupId, topic, 10); }