ReactiveEndToEndLatency(Map<String, Object> consumerPropsOverride, Map<String, Object> producerPropsOverride, String bootstrapServers, String topic) { super(consumerPropsOverride, producerPropsOverride, bootstrapServers, topic); sender = KafkaSender.create(SenderOptions.create(producerProps)); ReceiverOptions<byte[], byte[]> receiverOptions = ReceiverOptions.<byte[], byte[]>create(consumerProps) .addAssignListener(partitions -> { if (assignSemaphore.availablePermits() == 0) { partitions.forEach(p -> p.seekToEnd()); assignSemaphore.release(); } }) .subscription(Collections.singleton(topic)); flux = KafkaReceiver.create(receiverOptions) .receive(); receiveQueue = new LinkedBlockingQueue<>(); System.out.println("Running latency test using Reactive API, class=" + this.getClass().getName()); } public void initialize() {
public Flux<?> flux() { KafkaSender<Integer, Person> sender = sender(senderOptions()); return KafkaReceiver.create(receiverOptions(Collections.singleton(sourceTopic))) .receiveAtmostOnce() .map(cr -> SenderRecord.create(transform(cr.value()), cr.offset())) .as(sender::send) .doOnCancel(() -> close()); } public ProducerRecord<Integer, Person> transform(Person p) {
public Flux<?> flux() { sender = sender(senderOptions()); EmitterProcessor<Person> processor = EmitterProcessor.create(); FluxSink<Person> incoming = processor.sink(); Flux<?> inFlux = KafkaReceiver.create(receiverOptions(Collections.singleton(sourceTopic))) .receiveAutoAck() .concatMap(r -> r) .doOnNext(m -> incoming.next(m.value())); Flux<SenderResult<Integer>> stream1 = sender.send(processor.publishOn(scheduler1).map(p -> SenderRecord.create(process1(p, true), p.id()))); Flux<SenderResult<Integer>> stream2 = sender.send(processor.publishOn(scheduler2).map(p -> SenderRecord.create(process2(p, true), p.id()))); AtomicReference<Disposable> cancelRef = new AtomicReference<>(); Consumer<AtomicReference<Disposable>> cancel = cr -> { Disposable c = cr.getAndSet(null); if (c != null) c.dispose(); }; return Flux.merge(stream1, stream2) .doOnSubscribe(s -> cancelRef.set(inFlux.subscribe())) .doOnCancel(() -> { cancel.accept(cancelRef); close(); }) .doOnTerminate(() -> close()); } public ProducerRecord<Integer, Person> process1(Person p, boolean debug) {
@Override public Flux<?> flux() { KafkaSender<Integer, Person> sender = sender(senderOptions()); ReceiverOptions<Integer, Person> receiverOptions = receiverOptions(Collections.singleton(sourceTopic)); KafkaReceiver<Integer, Person> receiver = KafkaReceiver.create(receiverOptions); return receiver.receiveExactlyOnce(sender.transactionManager()) .concatMap(f -> sendAndCommit(f)) .onErrorResume(e -> sender.transactionManager().abort().then(Mono.error(e))) .doOnCancel(() -> close()); } private Flux<SenderResult<Integer>> sendAndCommit(Flux<ConsumerRecord<Integer, Person>> flux) {
public TestableReceiver createTestFlux() { KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> kafkaFlux = receiver.receive(); return new TestableReceiver(receiver, kafkaFlux); }
@Test public void atleastOnceClose() throws Exception { receiverOptions = receiverOptions.closeTimeout(Duration.ofMillis(1000)) .commitBatchSize(10) .commitInterval(Duration.ofMillis(60000)) .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> { if (receivedMessages.get(record.partition()).size() < 10) record.receiverOffset().acknowledge(); }); sendReceive(fluxWithAck, 0, 100, 0, 100); // Check that close commits ack'ed records, does not commit un-ack'ed records cancelSubscriptions(true); clearReceivedMessages(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux2 = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceive(kafkaFlux2, 100, 100, 10 * partitions, 200 - (10 * partitions)); }
public KafkaReceiver<Integer, String> createReceiver() { receiverOptions = receiverOptions.addAssignListener(this::onPartitionsAssigned) .subscription(Collections.singletonList(topic)); return KafkaReceiver.create(receiverOptions); }
private Flux<SenderResult<Integer>> receiveAndSendTransactions(KafkaReceiver<Integer, String> receiver, KafkaSender<Integer, String> sender, String destTopic, int count, int exceptionIndex) { AtomicInteger index = new AtomicInteger(); TransactionManager transactionManager = sender.transactionManager(); return receiver.receiveExactlyOnce(transactionManager) .concatMap(f -> sender.send( f.map(r -> toSenderRecord(destTopic, r, r.key())) .doOnNext(r -> { if (index.incrementAndGet() == exceptionIndex) { throw new RuntimeException("Test exception"); } }) ).concatWith(transactionManager.commit()) ) .take(count); } }
@Test public void autoCommitRetry() throws Exception { int count = 5; testAutoCommitFailureScenarios(true, count, 10, 0, 2); Flux<? extends ConsumerRecord<Integer, String>> flux = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceive(flux, count, count, count, count); }
@Test public void atmostOnce() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = receiver.receiveAtmostOnce(); sendReceive(kafkaFlux, 0, 10, 0, 10); // Second consumer should receive only new messages even though first one was not closed gracefully restartAndCheck(receiver, 10, 10, 0); }
private long committedCount(KafkaReceiver<Integer, String> receiver) { long committed = 0; for (int j = 0; j < partitions; j++) { TopicPartition p = new TopicPartition(topic, j); OffsetAndMetadata offset = receiver.doOnConsumer(c -> c.committed(p)).block(Duration.ofSeconds(receiveTimeoutMillis)); if (offset != null && offset.offset() > 0) committed += offset.offset(); } return committed; }
@Test public void sendReceive() throws Exception { Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = createReceiver() .receive(); sendReceive(kafkaFlux, 0, 100, 0, 100); }
/** * Tests transaction abort with messages to multiple partitions as well as offset commits * included within each transaction. */ @Test public void transactionAbort() throws Exception { int count = 30; sendMessages(srcTopic, 0, count); TransactionManager transactionManager = sender.transactionManager(); Flux<SenderResult<Integer>> flux = receiver.receiveExactlyOnce(transactionManager) .concatMap(f -> sendAndCommit(destTopic, f, 15)) .onErrorResume(e -> transactionManager.abort().then(Mono.error(e))); StepVerifier.create(flux.then()) .expectErrorMessage("Test exception") .verify(Duration.ofMillis(DEFAULT_TEST_TIMEOUT)); verifyTransaction(count, 10); assertEquals(2, producer.beginCount); assertEquals(1, producer.commitCount); assertEquals(1, producer.abortCount); assertEquals(1, producer.sendOffsetsCount); }
@Test public void autoAck() throws Exception { KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = receiver.receiveAutoAck().concatMap(r -> r); sendReceive(kafkaFlux, 0, 100, 0, 100); waitForCommits(receiver, 100); // Close consumer and create another one. First consumer should commit final offset on close. // Second consumer should receive only new messages. cancelSubscriptions(true); clearReceivedMessages(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux2 = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceive(kafkaFlux2, 100, 100, 100, 100); }
private void restartAndCheck(KafkaReceiver<Integer, String> receiver, int sendStartIndex, int sendCount, int maxRedelivered) throws Exception { Thread.sleep(500); // Give a little time for commits to complete before terminating abruptly new TestableReceiver(receiver).terminate(); cancelSubscriptions(true); clearReceivedMessages(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux2 = createReceiver().receiveAtmostOnce(); sendReceiveWithRedelivery(kafkaFlux2, sendStartIndex, sendCount, 0, maxRedelivered); clearReceivedMessages(); cancelSubscriptions(false); }
@Test public void atleastOnceCommitInterval() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(Integer.MAX_VALUE); receiverOptions.commitInterval(Duration.ofMillis(1000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); Thread.sleep(1500); restartAndCheck(receiver, 100, 100, 0); }
@Test public void consumerClose() throws Exception { int count = 10; for (int i = 0; i < 2; i++) { Collection<ReceiverPartition> seekablePartitions = new ArrayList<>(); receiverOptions = receiverOptions .addAssignListener(partitions -> { seekablePartitions.addAll(partitions); assignSemaphore.release(); }) .subscription(Collections.singletonList(topic)); KafkaReceiver<Integer, String> receiver = KafkaReceiver.create(receiverOptions); Flux<ConsumerRecord<Integer, String>> kafkaFlux = receiver .receiveAutoAck() .concatMap(r -> r); Disposable disposable = sendAndWaitForMessages(kafkaFlux, count); assertTrue("No partitions assigned", seekablePartitions.size() > 0); if (i == 0) waitForCommits(receiver, count); disposable.dispose(); try { seekablePartitions.iterator().next().seekToBeginning(); fail("Consumer not closed"); } catch (IllegalStateException e) { // expected exception } } }
/** * Tests transaction receive and send good path with messages to multiple partitions * as well as offset commits included within each transaction. */ @Test public void transactionalReceiveAndSend() throws Exception { int count = 600; sendMessages(srcTopic, 0, count); int transactionCount = count / maxPollRecords; Flux<SenderResult<Integer>> flux = receiver.receiveExactlyOnce(sender.transactionManager()) .concatMap(f -> sendAndCommit(destTopic, f, -1)); Disposable disposable = flux.subscribe(); waitForTransactions(transactionCount); disposable.dispose(); verifyTransaction(count, count); assertEquals(transactionCount, producer.beginCount); assertEquals(transactionCount, producer.commitCount); assertEquals(0, producer.abortCount); assertEquals(transactionCount, producer.sendOffsetsCount); }
@Test public void autoCommitNonRetriableException() throws Exception { int count = 5; receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); testAutoCommitFailureScenarios(false, count, 2, 0, 10); Flux<? extends ConsumerRecord<Integer, String>> flux = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceiveWithRedelivery(flux, count, count, 3, 5); }