private void testManualCommitRetry(boolean retriableException) throws Exception { int count = 1; int failureCount = 2; Semaphore receiveSemaphore = new Semaphore(1 - count); Semaphore commitSuccessSemaphore = new Semaphore(0); Semaphore commitFailureSemaphore = new Semaphore(0); receiverOptions = receiverOptions.commitInterval(Duration.ZERO).commitBatchSize(0); KafkaReceiver<Integer, String> receiver = createReceiver(); TestableReceiver testableReceiver = new TestableReceiver(receiver); Flux<? extends ConsumerRecord<Integer, String>> flux = testableReceiver .receiveWithManualCommitFailures(retriableException, failureCount, receiveSemaphore, commitSuccessSemaphore, commitFailureSemaphore); subscribe(flux, new CountDownLatch(count)); sendMessages(1, count); assertTrue("Did not receive messages", receiveSemaphore.tryAcquire(receiveTimeoutMillis, TimeUnit.MILLISECONDS)); assertTrue("Commit did not succeed after retry", commitSuccessSemaphore.tryAcquire(receiveTimeoutMillis, TimeUnit.MILLISECONDS)); assertEquals(failureCount, commitFailureSemaphore.availablePermits()); }
@Test public void atleastOnceCommitBatchSize() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(10); receiverOptions.commitInterval(Duration.ofMillis(60000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); /// Atmost batchSize records may be redelivered restartAndCheck(receiver, 100, 100, receiverOptions.commitBatchSize()); }
@Test public void atleastOnceCommitInterval() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(Integer.MAX_VALUE); receiverOptions.commitInterval(Duration.ofMillis(1000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); Thread.sleep(1500); restartAndCheck(receiver, 100, 100, 0); }
@Test public void atleastOnceCommitRecord() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(1); receiverOptions.commitInterval(Duration.ofMillis(60000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); // Atmost one record may be redelivered restartAndCheck(receiver, 100, 100, 1); }
/** * Tests manual commits for {@link KafkaReceiver#receive()} with synchronous commits * after message processing. */ @Test public void manualCommitSync() throws Exception { int count = 10; receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ofMillis(Long.MAX_VALUE)) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, count + 10); receiveAndVerify(10, record -> { StepVerifier.create(record.receiverOffset().commit()).expectComplete().verify(Duration.ofMillis(DEFAULT_TEST_TIMEOUT)); return Mono.just(record); }); verifyCommits(groupId, topic, 10); }
/** * Tests that commits are disabled completely if periodic commits by batch size * and periodic commits by interval are both disabled. */ @Test public void autoCommitDisable() throws Exception { receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ZERO) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, 20); receiveAndVerify(20); receivedMessages.clear(); consumerFactory.addConsumer(new MockConsumer(cluster)); receiveAndVerify(20); }
/** * Tests that inbound flux is terminated with an error if transient commit error persists * beyond maximum configured limit. */ @Test public void autoAckCommitTransientErrorMaxRetries() throws Exception { consumer.addCommitException(new RetriableCommitFailedException("coordinator failed"), 5); receiverOptions = receiverOptions .subscription(Collections.singleton(topic)) .maxCommitAttempts(5) .commitBatchSize(2); int count = 100; sendMessages(topic, 0, count); DefaultKafkaReceiver<Integer, String> receiver = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions); Semaphore errorSemaphore = new Semaphore(0); receiver.receiveAutoAck() .concatMap(r -> r) .doOnNext(r -> receivedMessages.add(r)) .doOnError(e -> errorSemaphore.release()) .subscribe(); assertTrue("Flux did not fail", errorSemaphore.tryAcquire(1, TimeUnit.SECONDS)); assertTrue("Commit failure did not fail flux", receivedMessages.size() < count); }
/** * Tests that inbound flux is terminated with an error if commit fails with non-retriable error. */ @Test public void autoAckCommitFatalError() throws Exception { consumer.addCommitException(new InvalidOffsetException("invalid offset"), 1); receiverOptions = receiverOptions .subscription(Collections.singleton(topic)) .maxCommitAttempts(5) .commitBatchSize(2); int count = 100; sendMessages(topic, 0, count); DefaultKafkaReceiver<Integer, String> receiver = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions); Semaphore errorSemaphore = new Semaphore(0); receiver.receiveAutoAck() .concatMap(r -> r) .doOnNext(r -> receivedMessages.add(r)) .doOnError(e -> errorSemaphore.release()) .subscribe(); assertTrue("Flux did not fail", errorSemaphore.tryAcquire(1, TimeUnit.SECONDS)); assertTrue("Commit failure did not fail flux", receivedMessages.size() < count); }
@Test public void manualAssignmentWithCommit() throws Exception { receiverOptions = receiverOptions.commitInterval(Duration.ZERO) .commitBatchSize(0) .assignment(getTopicPartitions()); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive() .delayUntil(r -> r.receiverOffset().commit()) .doOnSubscribe(s -> assignSemaphore.release()); sendReceiveWithSendDelay(kafkaFlux, Duration.ofMillis(1000), 0, 10); }
@Test public void manualCommitRetry() throws Exception { consumer.addCommitException(new RetriableCommitFailedException("coordinator failed"), 2); int count = 10; receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ofMillis(Long.MAX_VALUE)) .maxCommitAttempts(1) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, count + 10); receiveAndVerify(10, record -> record.receiverOffset().commit().retry().then(Mono.just(record))); verifyCommits(groupId, topic, 10); }
/** * Tests that commits are retried if the failure is transient and the manual commit Mono * is not failed if the commit succeeds within the configured number of attempts. */ @Test public void manualCommitAttempts() throws Exception { consumer.addCommitException(new RetriableCommitFailedException("coordinator failed"), 2); int count = 10; receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ofMillis(Long.MAX_VALUE)) .maxCommitAttempts(10) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, count + 10); receiveAndVerify(10, record -> record.receiverOffset().commit().then(Mono.just(record))); verifyCommits(groupId, topic, 10); }
/** * Tests that manual commit Mono is failed if commits did not succeed after a transient error * within the configured number of attempts. */ @Test public void manualCommitFailure() throws Exception { consumer.addCommitException(new RetriableCommitFailedException("coordinator failed"), 10); int count = 10; receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ofMillis(Long.MAX_VALUE)) .maxCommitAttempts(2) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, count + 10); receiveVerifyError(RetriableCommitFailedException.class, record -> record.receiverOffset().commit().retry(5).then(Mono.just(record)) ); }
/** * Tests that all acknowledged records are committed on close */ @Test public void autoCommitClose() throws Exception { receiverOptions = receiverOptions .commitBatchSize(100) .commitInterval(Duration.ofMillis(Long.MAX_VALUE)) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, 20); receiveAndVerify(20, r -> { if (r.receiverOffset().offset() < 5) r.receiverOffset().acknowledge(); return Mono.just(r); }); receivedMessages.removeIf(r -> r.offset() >= 5); consumerFactory.addConsumer(new MockConsumer(cluster)); receiveAndVerify(10); }
@Test public void atleastOnceClose() throws Exception { receiverOptions = receiverOptions.closeTimeout(Duration.ofMillis(1000)) .commitBatchSize(10) .commitInterval(Duration.ofMillis(60000)) .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> { if (receivedMessages.get(record.partition()).size() < 10) record.receiverOffset().acknowledge(); }); sendReceive(fluxWithAck, 0, 100, 0, 100); // Check that close commits ack'ed records, does not commit un-ack'ed records cancelSubscriptions(true); clearReceivedMessages(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux2 = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceive(kafkaFlux2, 100, 100, 10 * partitions, 200 - (10 * partitions)); }
@Test public void manualCommitRecordAsync() throws Exception { int count = 10; CountDownLatch commitLatch = new CountDownLatch(count); long[] committedOffsets = new long[partitions]; receiverOptions = receiverOptions .commitInterval(Duration.ZERO) .commitBatchSize(0) .addAssignListener(this::seekToBeginning) .subscription(Collections.singletonList(topic)); Flux<ReceiverRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive() .doOnNext(record -> record.receiverOffset() .commit() .doOnSuccess(i -> onCommit(record, commitLatch, committedOffsets)) .doOnError(e -> log.error("Commit exception", e)) .subscribe()); subscribe(kafkaFlux, new CountDownLatch(count)); sendMessages(0, count); checkCommitCallbacks(commitLatch, committedOffsets); }
ImmutableReceiverOptions(ReceiverOptions<K, V> options) { this( options.consumerProperties(), options.assignListeners(), options.revokeListeners(), options.keyDeserializer(), options.valueDeserializer(), options.pollTimeout(), options.closeTimeout(), options.commitInterval(), options.commitBatchSize(), options.atmostOnceCommitAheadSize(), options.maxCommitAttempts(), options.subscriptionTopics(), options.assignment(), options.subscriptionPattern(), options.schedulerSupplier() ); }
/** * Tests that offsets that are not committed explicitly are not committed * on close and that uncommitted records are redelivered on the next receive. */ @Test public void manualCommitClose() throws Exception { receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ZERO) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, 20); receiveAndVerify(20, r -> { if (r.receiverOffset().offset() < 5) return r.receiverOffset().commit().then(Mono.just(r)); return Mono.just(r); }); receivedMessages.removeIf(r -> r.offset() >= 5); consumerFactory.addConsumer(new MockConsumer(cluster)); receiveAndVerify(10); }
ImmutableReceiverOptions(ReceiverOptions<K, V> options) { this( options.consumerProperties(), options.assignListeners(), options.revokeListeners(), options.keyDeserializer(), options.valueDeserializer(), options.pollTimeout(), options.closeTimeout(), options.commitInterval(), options.commitBatchSize(), options.atmostOnceCommitAheadSize(), options.maxCommitAttempts(), options.subscriptionTopics(), options.assignment(), options.subscriptionPattern(), options.schedulerSupplier() ); }
@Test public void manualCommitSync() throws Exception { int count = 10; CountDownLatch commitLatch = new CountDownLatch(count); long[] committedOffsets = new long[partitions]; for (int i = 0; i < committedOffsets.length; i++) committedOffsets[i] = 0; receiverOptions = receiverOptions.commitInterval(Duration.ZERO).commitBatchSize(0); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = receiver.receive() .delayUntil(record -> { assertEquals(committedOffsets[record.partition()], record.offset()); return record.receiverOffset().commit() .doOnSuccess(i -> onCommit(record, commitLatch, committedOffsets)); }) .doOnError(e -> log.error("KafkaFlux exception", e)); sendAndWaitForMessages(kafkaFlux, count); checkCommitCallbacks(commitLatch, committedOffsets); }
/** * Tests that retriable commit exceptions are retried with {@link KafkaReceiver#receiveAutoAck()} */ @Test public void autoAckCommitTransientError() { consumer.addCommitException(new RetriableCommitFailedException("coordinator failed"), 3); receiverOptions = receiverOptions .subscription(Collections.singleton(topic)) .maxCommitAttempts(5) .commitBatchSize(2); sendMessages(topic, 0, 20); Flux<? extends ConsumerRecord<Integer, String>> inboundFlux = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions) .receiveAutoAck() .concatMap(r -> r); verifyMessages(inboundFlux.take(11), 11); receivedMessages.removeIf(r -> r.offset() >= 5); // Last record should not be committed verifyCommits(groupId, topic, 10); }