ReactiveEndToEndLatency(Map<String, Object> consumerPropsOverride, Map<String, Object> producerPropsOverride, String bootstrapServers, String topic) { super(consumerPropsOverride, producerPropsOverride, bootstrapServers, topic); sender = KafkaSender.create(SenderOptions.create(producerProps)); ReceiverOptions<byte[], byte[]> receiverOptions = ReceiverOptions.<byte[], byte[]>create(consumerProps) .addAssignListener(partitions -> { if (assignSemaphore.availablePermits() == 0) { partitions.forEach(p -> p.seekToEnd()); assignSemaphore.release(); } }) .subscription(Collections.singleton(topic)); flux = KafkaReceiver.create(receiverOptions) .receive(); receiveQueue = new LinkedBlockingQueue<>(); System.out.println("Running latency test using Reactive API, class=" + this.getClass().getName()); } public void initialize() {
@Test public void atleastOnceClose() throws Exception { receiverOptions = receiverOptions.closeTimeout(Duration.ofMillis(1000)) .commitBatchSize(10) .commitInterval(Duration.ofMillis(60000)) .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> { if (receivedMessages.get(record.partition()).size() < 10) record.receiverOffset().acknowledge(); }); sendReceive(fluxWithAck, 0, 100, 0, 100); // Check that close commits ack'ed records, does not commit un-ack'ed records cancelSubscriptions(true); clearReceivedMessages(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux2 = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceive(kafkaFlux2, 100, 100, 10 * partitions, 200 - (10 * partitions)); }
public TestableReceiver createTestFlux() { KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> kafkaFlux = receiver.receive(); return new TestableReceiver(receiver, kafkaFlux); }
public Flux<?> flux() { Scheduler scheduler = Schedulers.newElastic("sample", 60, true); return KafkaReceiver.create(receiverOptions(Collections.singleton(topic)).commitInterval(Duration.ZERO)) .receive() .groupBy(m -> m.receiverOffset().topicPartition()) .flatMap(partitionFlux -> partitionFlux.publishOn(scheduler) .map(r -> processRecord(partitionFlux.key(), r)) .sample(Duration.ofMillis(5000)) .concatMap(offset -> offset.commit())) .doOnCancel(() -> close()); } public ReceiverOffset processRecord(TopicPartition topicPartition, ReceiverRecord<Integer, Person> message) {
@Test public void sendReceive() throws Exception { Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = createReceiver() .receive(); sendReceive(kafkaFlux, 0, 100, 0, 100); }
public Flux<?> flux() { KafkaSender<Integer, Person> sender = sender(senderOptions()); return KafkaReceiver.create(receiverOptions(Collections.singleton(sourceTopic))) .receive() .map(m -> SenderRecord.create(transform(m.value()), m.receiverOffset())) .as(sender::send) .doOnNext(m -> m.correlationMetadata().acknowledge()) .doOnCancel(() -> close()); } public ProducerRecord<Integer, Person> transform(Person p) {
@Test public void manualAssignment() throws Exception { receiverOptions = receiverOptions .assignment(getTopicPartitions()); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive() .doOnSubscribe(s -> assignSemaphore.release()); sendReceiveWithSendDelay(kafkaFlux, Duration.ofMillis(1000), 0, 10); }
@Test public void wildcardSubscribe() throws Exception { receiverOptions = receiverOptions .addAssignListener(this::onPartitionsAssigned) .subscription(Pattern.compile("test.*")); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive(); sendReceive(kafkaFlux, 0, 10, 0, 10); }
public Flux<?> flux() { return KafkaReceiver.create(receiverOptions(Collections.singletonList(topic)).commitInterval(Duration.ZERO)) .receive() .publishOn(scheduler) .concatMap(m -> storeInDB(m.value()) .thenEmpty(m.receiverOffset().commit())) .retry() .doOnCancel(() -> close()); } public Mono<Void> storeInDB(Person person) {
@Test public void seekToBeginning() throws Exception { int count = 10; sendMessages(0, count); receiverOptions = receiverOptions .addAssignListener(this::seekToBeginning) .subscription(Collections.singletonList(topic)); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive(); sendReceive(kafkaFlux, count, count, 0, count * 2); }
@Test public void atleastOnceCommitInterval() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(Integer.MAX_VALUE); receiverOptions.commitInterval(Duration.ofMillis(1000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); Thread.sleep(1500); restartAndCheck(receiver, 100, 100, 0); }
@Test public void atleastOnceCommitRecord() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(1); receiverOptions.commitInterval(Duration.ofMillis(60000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); // Atmost one record may be redelivered restartAndCheck(receiver, 100, 100, 1); }
@Test public void seekToEnd() throws Exception { int count = 10; sendMessages(0, count); receiverOptions = receiverOptions .addAssignListener(partitions -> { for (ReceiverPartition partition : partitions) partition.seekToEnd(); onPartitionsAssigned(partitions); }) .subscription(Collections.singletonList(topic)); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive(); sendReceiveWithSendDelay(kafkaFlux, Duration.ofMillis(100), count, count); }
@Test public void manualAssignmentWithCommit() throws Exception { receiverOptions = receiverOptions.commitInterval(Duration.ZERO) .commitBatchSize(0) .assignment(getTopicPartitions()); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive() .delayUntil(r -> r.receiverOffset().commit()) .doOnSubscribe(s -> assignSemaphore.release()); sendReceiveWithSendDelay(kafkaFlux, Duration.ofMillis(1000), 0, 10); }
@Test public void sendTransactionalReadCommitted() throws Exception { receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"); int count = 100; CountDownLatch latch = new CountDownLatch(count); subscribe(createReceiver().receive(), latch); KafkaSender<Integer, String> txSender = createTransactionalSender(); txSender.sendTransactionally(Flux.just(createSenderRecords(0, count, true))) .doOnNext(result -> assertEquals(count, latch.getCount())) .blockLast(Duration.ofSeconds(receiveTimeoutMillis)); waitForMessages(latch); checkConsumedMessages(0, count); }
@Test public void sendTransactionalReadUncommitted() throws Exception { receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_uncommitted"); int count = 100; CountDownLatch latch1 = new CountDownLatch(count); CountDownLatch latch2 = new CountDownLatch(count * 2); CountDownLatch latch3 = new CountDownLatch(count * 3); subscribe(createReceiver().receive(), latch1, latch2, latch3); sendMessages(0, count); waitForMessages(latch1); // non-transactional messages received KafkaSender<Integer, String> txSender = createTransactionalSender(); txSender.sendTransactionally(Flux.just(createSenderRecords(count, count, true))) .then().block(Duration.ofSeconds(receiveTimeoutMillis)); waitForMessages(latch2); // transactional messages received before commit sendMessages(count * 2, count); waitForMessages(latch3); checkConsumedMessages(0, count * 3); }
@Test public void atleastOnceCommitBatchSize() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(10); receiverOptions.commitInterval(Duration.ofMillis(60000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); /// Atmost batchSize records may be redelivered restartAndCheck(receiver, 100, 100, receiverOptions.commitBatchSize()); }
@Test public void autoCommitFailurePropagationAfterRetries() throws Exception { int count = 5; receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") .maxCommitAttempts(2); testAutoCommitFailureScenarios(true, count, 2, 0, Integer.MAX_VALUE); Flux<? extends ConsumerRecord<Integer, String>> flux = createReceiver().receive(); sendReceiveWithRedelivery(flux, count, count, 2, 5); }
@Test public void seekToOffset() throws Exception { int count = 10; sendMessages(0, count); receiverOptions = receiverOptions .addAssignListener(partitions -> { onPartitionsAssigned(partitions); for (ReceiverPartition partition : partitions) partition.seek(1); }) .subscription(Collections.singletonList(topic)); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive() .doOnError(e -> log.error("KafkaFlux exception", e)); sendReceive(kafkaFlux, count, count, partitions, count * 2 - partitions); }
@Test public void brokerRestart() throws Exception { int sendBatchSize = 10; receiverOptions = receiverOptions.maxCommitAttempts(1000); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = createReceiver() .receive() .doOnError(e -> log.error("KafkaFlux exception", e)); CountDownLatch receiveLatch = new CountDownLatch(sendBatchSize * 2); subscribe(kafkaFlux, receiveLatch); sendMessagesSync(0, sendBatchSize); shutdownKafkaBroker(); TestUtils.sleep(5000); restartKafkaBroker(); sendMessagesSync(sendBatchSize, sendBatchSize); waitForMessages(receiveLatch); checkConsumedMessages(); }