@Test public void sendReceive() throws Exception { Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = createReceiver() .receive(); sendReceive(kafkaFlux, 0, 100, 0, 100); }
@Test public void atmostOnce() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = receiver.receiveAtmostOnce(); sendReceive(kafkaFlux, 0, 10, 0, 10); // Second consumer should receive only new messages even though first one was not closed gracefully restartAndCheck(receiver, 10, 10, 0); }
@Test public void wildcardSubscribe() throws Exception { receiverOptions = receiverOptions .addAssignListener(this::onPartitionsAssigned) .subscription(Pattern.compile("test.*")); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive(); sendReceive(kafkaFlux, 0, 10, 0, 10); }
@Test public void seekToBeginning() throws Exception { int count = 10; sendMessages(0, count); receiverOptions = receiverOptions .addAssignListener(this::seekToBeginning) .subscription(Collections.singletonList(topic)); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive(); sendReceive(kafkaFlux, count, count, 0, count * 2); }
@Test public void atleastOnceCommitInterval() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(Integer.MAX_VALUE); receiverOptions.commitInterval(Duration.ofMillis(1000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); Thread.sleep(1500); restartAndCheck(receiver, 100, 100, 0); }
@Test public void autoCommitRetry() throws Exception { int count = 5; testAutoCommitFailureScenarios(true, count, 10, 0, 2); Flux<? extends ConsumerRecord<Integer, String>> flux = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceive(flux, count, count, count, count); }
@Test public void atleastOnceClose() throws Exception { receiverOptions = receiverOptions.closeTimeout(Duration.ofMillis(1000)) .commitBatchSize(10) .commitInterval(Duration.ofMillis(60000)) .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> { if (receivedMessages.get(record.partition()).size() < 10) record.receiverOffset().acknowledge(); }); sendReceive(fluxWithAck, 0, 100, 0, 100); // Check that close commits ack'ed records, does not commit un-ack'ed records cancelSubscriptions(true); clearReceivedMessages(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux2 = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceive(kafkaFlux2, 100, 100, 10 * partitions, 200 - (10 * partitions)); }
@Test public void autoAck() throws Exception { KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = receiver.receiveAutoAck().concatMap(r -> r); sendReceive(kafkaFlux, 0, 100, 0, 100); waitForCommits(receiver, 100); // Close consumer and create another one. First consumer should commit final offset on close. // Second consumer should receive only new messages. cancelSubscriptions(true); clearReceivedMessages(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux2 = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceive(kafkaFlux2, 100, 100, 100, 100); }
@Test public void atleastOnceCommitRecord() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(1); receiverOptions.commitInterval(Duration.ofMillis(60000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); // Atmost one record may be redelivered restartAndCheck(receiver, 100, 100, 1); }
@Test public void seekToOffset() throws Exception { int count = 10; sendMessages(0, count); receiverOptions = receiverOptions .addAssignListener(partitions -> { onPartitionsAssigned(partitions); for (ReceiverPartition partition : partitions) partition.seek(1); }) .subscription(Collections.singletonList(topic)); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive() .doOnError(e -> log.error("KafkaFlux exception", e)); sendReceive(kafkaFlux, count, count, partitions, count * 2 - partitions); }
@Test public void atleastOnceCommitBatchSize() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(10); receiverOptions.commitInterval(Duration.ofMillis(60000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); /// Atmost batchSize records may be redelivered restartAndCheck(receiver, 100, 100, receiverOptions.commitBatchSize()); }