public TestableReceiver createTestFlux() { KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> kafkaFlux = receiver.receive(); return new TestableReceiver(receiver, kafkaFlux); }
@Test public void sendReceive() throws Exception { Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = createReceiver() .receive(); sendReceive(kafkaFlux, 0, 100, 0, 100); }
private void testManualCommitRetry(boolean retriableException) throws Exception { int count = 1; int failureCount = 2; Semaphore receiveSemaphore = new Semaphore(1 - count); Semaphore commitSuccessSemaphore = new Semaphore(0); Semaphore commitFailureSemaphore = new Semaphore(0); receiverOptions = receiverOptions.commitInterval(Duration.ZERO).commitBatchSize(0); KafkaReceiver<Integer, String> receiver = createReceiver(); TestableReceiver testableReceiver = new TestableReceiver(receiver); Flux<? extends ConsumerRecord<Integer, String>> flux = testableReceiver .receiveWithManualCommitFailures(retriableException, failureCount, receiveSemaphore, commitSuccessSemaphore, commitFailureSemaphore); subscribe(flux, new CountDownLatch(count)); sendMessages(1, count); assertTrue("Did not receive messages", receiveSemaphore.tryAcquire(receiveTimeoutMillis, TimeUnit.MILLISECONDS)); assertTrue("Commit did not succeed after retry", commitSuccessSemaphore.tryAcquire(receiveTimeoutMillis, TimeUnit.MILLISECONDS)); assertEquals(failureCount, commitFailureSemaphore.availablePermits()); }
@Test public void atmostOnce() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = receiver.receiveAtmostOnce(); sendReceive(kafkaFlux, 0, 10, 0, 10); // Second consumer should receive only new messages even though first one was not closed gracefully restartAndCheck(receiver, 10, 10, 0); }
@Test public void atleastOnceCommitInterval() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(Integer.MAX_VALUE); receiverOptions.commitInterval(Duration.ofMillis(1000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); Thread.sleep(1500); restartAndCheck(receiver, 100, 100, 0); }
@Test public void autoCommitRetry() throws Exception { int count = 5; testAutoCommitFailureScenarios(true, count, 10, 0, 2); Flux<? extends ConsumerRecord<Integer, String>> flux = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceive(flux, count, count, count, count); }
@Test public void autoAck() throws Exception { KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = receiver.receiveAutoAck().concatMap(r -> r); sendReceive(kafkaFlux, 0, 100, 0, 100); waitForCommits(receiver, 100); // Close consumer and create another one. First consumer should commit final offset on close. // Second consumer should receive only new messages. cancelSubscriptions(true); clearReceivedMessages(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux2 = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceive(kafkaFlux2, 100, 100, 100, 100); }
@Test public void atleastOnceCommitRecord() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(1); receiverOptions.commitInterval(Duration.ofMillis(60000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); // Atmost one record may be redelivered restartAndCheck(receiver, 100, 100, 1); }
@Test public void atleastOnceClose() throws Exception { receiverOptions = receiverOptions.closeTimeout(Duration.ofMillis(1000)) .commitBatchSize(10) .commitInterval(Duration.ofMillis(60000)) .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> { if (receivedMessages.get(record.partition()).size() < 10) record.receiverOffset().acknowledge(); }); sendReceive(fluxWithAck, 0, 100, 0, 100); // Check that close commits ack'ed records, does not commit un-ack'ed records cancelSubscriptions(true); clearReceivedMessages(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux2 = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceive(kafkaFlux2, 100, 100, 10 * partitions, 200 - (10 * partitions)); }
private void restartAndCheck(KafkaReceiver<Integer, String> receiver, int sendStartIndex, int sendCount, int maxRedelivered) throws Exception { Thread.sleep(500); // Give a little time for commits to complete before terminating abruptly new TestableReceiver(receiver).terminate(); cancelSubscriptions(true); clearReceivedMessages(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux2 = createReceiver().receiveAtmostOnce(); sendReceiveWithRedelivery(kafkaFlux2, sendStartIndex, sendCount, 0, maxRedelivered); clearReceivedMessages(); cancelSubscriptions(false); }
@Test public void atleastOnceCommitBatchSize() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(10); receiverOptions.commitInterval(Duration.ofMillis(60000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); /// Atmost batchSize records may be redelivered restartAndCheck(receiver, 100, 100, receiverOptions.commitBatchSize()); }
@Test public void autoCommitNonRetriableException() throws Exception { int count = 5; receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); testAutoCommitFailureScenarios(false, count, 2, 0, 10); Flux<? extends ConsumerRecord<Integer, String>> flux = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceiveWithRedelivery(flux, count, count, 3, 5); }
@Test public void sendTransactionalReadCommitted() throws Exception { receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"); int count = 100; CountDownLatch latch = new CountDownLatch(count); subscribe(createReceiver().receive(), latch); KafkaSender<Integer, String> txSender = createTransactionalSender(); txSender.sendTransactionally(Flux.just(createSenderRecords(0, count, true))) .doOnNext(result -> assertEquals(count, latch.getCount())) .blockLast(Duration.ofSeconds(receiveTimeoutMillis)); waitForMessages(latch); checkConsumedMessages(0, count); }
@Test public void sendTransactionalReadUncommitted() throws Exception { receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_uncommitted"); int count = 100; CountDownLatch latch1 = new CountDownLatch(count); CountDownLatch latch2 = new CountDownLatch(count * 2); CountDownLatch latch3 = new CountDownLatch(count * 3); subscribe(createReceiver().receive(), latch1, latch2, latch3); sendMessages(0, count); waitForMessages(latch1); // non-transactional messages received KafkaSender<Integer, String> txSender = createTransactionalSender(); txSender.sendTransactionally(Flux.just(createSenderRecords(count, count, true))) .then().block(Duration.ofSeconds(receiveTimeoutMillis)); waitForMessages(latch2); // transactional messages received before commit sendMessages(count * 2, count); waitForMessages(latch3); checkConsumedMessages(0, count * 3); }
@Test public void autoCommitFailurePropagationAfterRetries() throws Exception { int count = 5; receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") .maxCommitAttempts(2); testAutoCommitFailureScenarios(true, count, 2, 0, Integer.MAX_VALUE); Flux<? extends ConsumerRecord<Integer, String>> flux = createReceiver().receive(); sendReceiveWithRedelivery(flux, count, count, 2, 5); }
@Test public void offsetResetLatest() throws Exception { int count = 10; sendMessages(0, count); receiverOptions = receiverOptions .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest") .addAssignListener(partitions -> assignSemaphore.release()); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = createReceiver() .receive() .doOnNext(record -> onReceive(record)); StepVerifier.create(kafkaFlux) .then(() -> assignSemaphore.acquireUninterruptibly()) .expectNoEvent(Duration.ofMillis(100)) .then(() -> sendMessages(count, count)) .expectNextCount(count) .thenCancel() .verify(Duration.ofSeconds(receiveTimeoutMillis)); checkConsumedMessages(count, count); }
@Test public void brokerRestart() throws Exception { int sendBatchSize = 10; receiverOptions = receiverOptions.maxCommitAttempts(1000); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = createReceiver() .receive() .doOnError(e -> log.error("KafkaFlux exception", e)); CountDownLatch receiveLatch = new CountDownLatch(sendBatchSize * 2); subscribe(kafkaFlux, receiveLatch); sendMessagesSync(0, sendBatchSize); shutdownKafkaBroker(); TestUtils.sleep(5000); restartKafkaBroker(); sendMessagesSync(sendBatchSize, sendBatchSize); waitForMessages(receiveLatch); checkConsumedMessages(); }
@Test public void sendNonTransactionalReadCommitted() throws Exception { receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"); int count = 100; CountDownLatch latch1 = new CountDownLatch(count); CountDownLatch latch2 = new CountDownLatch(count * 3); subscribe(createReceiver().receive(), latch1, latch2); sendMessages(0, count); waitForMessages(latch1); // non-transactional messages received if no commits pending checkConsumedMessages(0, count); KafkaSender<Integer, String> txSender = createTransactionalSender(); TransactionManager txn = txSender.transactionManager(); txn.begin() .thenMany(txSender.send(createSenderRecords(count, count, true))) .blockLast(Duration.ofSeconds(receiveTimeoutMillis)); sendMessages(count * 2, count); Thread.sleep(1000); assertEquals(count * 2, latch2.getCount()); // non-transactional and transactional messages not received while commit pending txn.commit().subscribe(); waitForMessages(latch2); checkConsumedMessages(0, count * 3); }
@Test public void manualCommitSync() throws Exception { int count = 10; CountDownLatch commitLatch = new CountDownLatch(count); long[] committedOffsets = new long[partitions]; for (int i = 0; i < committedOffsets.length; i++) committedOffsets[i] = 0; receiverOptions = receiverOptions.commitInterval(Duration.ZERO).commitBatchSize(0); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = receiver.receive() .delayUntil(record -> { assertEquals(committedOffsets[record.partition()], record.offset()); return record.receiverOffset().commit() .doOnSuccess(i -> onCommit(record, commitLatch, committedOffsets)); }) .doOnError(e -> log.error("KafkaFlux exception", e)); sendAndWaitForMessages(kafkaFlux, count); checkCommitCallbacks(commitLatch, committedOffsets); }
@Test public void abortTransaction() throws Exception { receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = createReceiver().receive(); int count = 100; CountDownLatch latch1 = new CountDownLatch(count); CountDownLatch latch2 = new CountDownLatch(count * 2); subscribe(kafkaFlux, latch1, latch2); KafkaSender<Integer, String> txSender = createTransactionalSender(); txSender.transactionManager().begin() .thenMany(txSender.send(createSenderRecords(0, count, false))) .then(txSender.transactionManager().abort()) .then().block(Duration.ofSeconds(receiveTimeoutMillis)); sendMessages(count, count); waitForMessages(latch1); // non-transactional messages received if no commits pending checkConsumedMessages(count, count); txSender.sendTransactionally(Flux.just(createSenderRecords(count * 2, count, true))) .then().subscribe(); waitForMessages(latch2); checkConsumedMessages(count, count * 3); }