public Disposable consumeMessages(String topic, CountDownLatch latch) { ReceiverOptions<Integer, String> options = receiverOptions.subscription(Collections.singleton(topic)) .addAssignListener(partitions -> log.debug("onPartitionsAssigned {}", partitions)) .addRevokeListener(partitions -> log.debug("onPartitionsRevoked {}", partitions)); Flux<ReceiverRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(options).receive(); return kafkaFlux.subscribe(record -> { ReceiverOffset offset = record.receiverOffset(); System.out.printf("Received message: topic-partition=%s offset=%d timestamp=%s key=%d value=%s\n", offset.topicPartition(), offset.offset(), dateFormat.format(new Date(record.timestamp())), record.key(), record.value()); offset.acknowledge(); latch.countDown(); }); }
public Flux<?> flux() { KafkaSender<Integer, Person> sender = sender(senderOptions()); return KafkaReceiver.create(receiverOptions(Collections.singleton(sourceTopic))) .receive() .map(m -> SenderRecord.create(transform(m.value()), m.receiverOffset())) .as(sender::send) .doOnNext(m -> m.correlationMetadata().acknowledge()) .doOnCancel(() -> close()); } public ProducerRecord<Integer, Person> transform(Person p) {
@Test public void publishFromEventScheduler() throws Exception { receiverOptions = receiverOptions .schedulerSupplier(Schedulers::immediate) .addAssignListener(this::onPartitionsAssigned) .subscription(Collections.singletonList(topic)); KafkaReceiver<Integer, String> receiver = KafkaReceiver.create(receiverOptions); AtomicReference<String> publishingThreadName = new AtomicReference<>(); CountDownLatch receiveLatch = new CountDownLatch(1); Disposable disposable = receiver.receive() .doOnNext(record -> { publishingThreadName.set(Thread.currentThread().getName()); record.receiverOffset().acknowledge(); receiveLatch.countDown(); }) .subscribe(); subscribeDisposables.add(disposable); waitFoPartitionAssignment(); sendMessages(0, 1); waitForMessages(receiveLatch); assertNotNull(publishingThreadName.get()); assertTrue(publishingThreadName.get().startsWith("reactive-kafka-")); }
/** * Tests that acknowledged offsets are committed using the configured batch size. */ @Test public void manualAckCommitBatchSize() { topic = topics.get(1); int batchSize = 4; receiverOptions = receiverOptions .subscription(Collections.singleton(topic)) .commitBatchSize(batchSize); AtomicLong lastCommitted = new AtomicLong(-1); sendMessages(topic, 0, 20); receiveAndVerify(15, r -> { long offset = r.receiverOffset().offset(); if (offset < 10) { r.receiverOffset().acknowledge(); if (((offset + 1) % batchSize) == 0) lastCommitted.set(offset); } else uncommittedMessages.add(r); verifyCommit(r, lastCommitted.get()); return Mono.just(r); }); verifyCommits(groupId, topic, 10); }
long offset = r.receiverOffset().offset(); if (offset < 10) { r.receiverOffset().acknowledge(); if (offset == delayIndex) { TestUtils.sleep(interval.toMillis());
@Test public void publishFromCustomScheduler() throws Exception { String schedulerName = "custom-scheduler"; Scheduler scheduler = Schedulers.newElastic(schedulerName); receiverOptions = receiverOptions .schedulerSupplier(() -> scheduler) .addAssignListener(this::onPartitionsAssigned) .subscription(Collections.singletonList(topic)); KafkaReceiver<Integer, String> receiver = KafkaReceiver.create(receiverOptions); AtomicReference<String> publishingThreadName = new AtomicReference<>(); CountDownLatch receiveLatch = new CountDownLatch(1); Disposable disposable = receiver.receive() .doOnNext(record -> { publishingThreadName.set(Thread.currentThread().getName()); record.receiverOffset().acknowledge(); receiveLatch.countDown(); }) .subscribe(); subscribeDisposables.add(scheduler); subscribeDisposables.add(disposable); waitFoPartitionAssignment(); sendMessages(0, 1); waitForMessages(receiveLatch); assertNotNull(publishingThreadName.get()); assertTrue(publishingThreadName.get().startsWith(schedulerName)); }
@Test public void atleastOnceCommitInterval() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(Integer.MAX_VALUE); receiverOptions.commitInterval(Duration.ofMillis(1000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); Thread.sleep(1500); restartAndCheck(receiver, 100, 100, 0); }
/** * Tests that only acknowledged offsets are committed with manual-ack using * {@link KafkaReceiver#receive()}. */ @Test public void manualAck() { receiverOptions = receiverOptions .subscription(Collections.singleton(topic)) .commitBatchSize(1); Map<TopicPartition, Long> acknowledged = new ConcurrentHashMap<>(); for (TopicPartition partition : cluster.partitions(topic)) acknowledged.put(partition, -1L); sendMessages(topic, 0, 20); receiveAndVerify(10, r -> { ReceiverOffset offset = r.receiverOffset(); TopicPartition partition = offset.topicPartition(); Long committedOffset = cluster.committedOffset(groupId, partition); boolean valid = committedOffset == null || acknowledged.get(partition) >= committedOffset - 1; if (offset.offset() % 3 == 0) { offset.acknowledge(); acknowledged.put(partition, offset.offset()); } assertTrue("Unexpected commit state", valid); return Mono.just(r); }); for (Map.Entry<TopicPartition, Long> entry : acknowledged.entrySet()) { Long committedOffset = cluster.committedOffset(groupId, entry.getKey()); assertEquals(entry.getValue() + 1, committedOffset.longValue()); } }
/** * Tests that acknowledged offsets are committed using the configured commit interval. */ @Test public void manualAckCommitInterval() { topic = topics.get(1); Duration interval = Duration.ofMillis(500); receiverOptions = receiverOptions .subscription(Collections.singleton(topic)) .commitInterval(interval); AtomicLong lastCommitted = new AtomicLong(-1); final int delayIndex = 5; sendMessages(topic, 0, 20); receiveAndVerify(15, r -> { long offset = r.receiverOffset().offset(); if (r.receiverOffset().offset() < 10) { r.receiverOffset().acknowledge(); if (offset == delayIndex) { TestUtils.sleep(interval.toMillis()); lastCommitted.set(offset); } } else uncommittedMessages.add(r); verifyCommit(r, lastCommitted.get()); return Mono.just(r); }); verifyCommits(groupId, topic, 10); }
@Test public void atleastOnceCommitRecord() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(1); receiverOptions.commitInterval(Duration.ofMillis(60000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); // Atmost one record may be redelivered restartAndCheck(receiver, 100, 100, 1); }
@Test public void manualCommitBatch() throws Exception { int count = 20; int commitIntervalMessages = 4; CountDownLatch commitLatch = new CountDownLatch(count / commitIntervalMessages); long[] committedOffsets = new long[partitions]; for (int i = 0; i < committedOffsets.length; i++) committedOffsets[i] = -1; List<ReceiverOffset> uncommitted = new ArrayList<>(); receiverOptions = receiverOptions.commitInterval(Duration.ZERO).commitBatchSize(0); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = receiver.receive() .concatMap(record -> { ReceiverOffset offset = record.receiverOffset(); offset.acknowledge(); uncommitted.add(offset); if (uncommitted.size() == commitIntervalMessages) { return offset.commit() .doOnSuccess(i -> onCommit(uncommitted, commitLatch, committedOffsets)) .doOnError(e -> log.error("Commit exception", e)) .then(Mono.just(record)); } return Mono.just(record); }) .doOnError(e -> log.error("KafkaFlux exception", e)); sendAndWaitForMessages(kafkaFlux, count); checkCommitCallbacks(commitLatch, committedOffsets); }
@Test public void atleastOnceCommitBatchSize() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(10); receiverOptions.commitInterval(Duration.ofMillis(60000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); /// Atmost batchSize records may be redelivered restartAndCheck(receiver, 100, 100, receiverOptions.commitBatchSize()); }
@Test public void manualCommitFailure() throws Exception { int count = 1; AtomicBoolean commitSuccess = new AtomicBoolean(); Semaphore commitErrorSemaphore = new Semaphore(0); receiverOptions = receiverOptions.commitInterval(Duration.ZERO).commitBatchSize(0); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = receiver.receive() .doOnNext(record -> { ReceiverOffset offset = record.receiverOffset(); TestableReceiver.setNonExistentPartition(offset); record.receiverOffset().acknowledge(); record.receiverOffset().commit() .doOnError(e -> commitErrorSemaphore.release()) .doOnSuccess(i -> commitSuccess.set(true)) .subscribe(); }) .doOnError(e -> log.error("KafkaFlux exception", e)); subscribe(kafkaFlux, new CountDownLatch(count)); sendMessages(1, count); assertTrue("Commit error callback not invoked", commitErrorSemaphore.tryAcquire(receiveTimeoutMillis, TimeUnit.MILLISECONDS)); assertFalse("Commit of non existent topic succeeded", commitSuccess.get()); }
@Test public void messageProcessorFailure() throws Exception { int count = 200; int successfulReceives = 100; CountDownLatch receiveLatch = new CountDownLatch(successfulReceives + 1); receiverOptions = receiverOptions .addAssignListener(this::onPartitionsAssigned) .subscription(Collections.singletonList(topic)); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive() .publishOn(Schedulers.single()) .doOnNext(record -> { receiveLatch.countDown(); if (receiveLatch.getCount() == 0) throw new RuntimeException("Test exception"); record.receiverOffset().acknowledge(); }); CountDownLatch latch = new CountDownLatch(successfulReceives); subscribe(kafkaFlux, latch); sendMessages(0, count); waitForMessages(latch); TestUtils.sleep(100); assertEquals(successfulReceives, count(receivedMessages)); }
@Test public void resumeAfterFailure() throws Exception { int count = 20; CountDownLatch receiveLatch = new CountDownLatch(count + 1); receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") .addAssignListener(this::onPartitionsAssigned) .subscription(Collections.singletonList(topic)); KafkaReceiver<Integer, String> receiver = KafkaReceiver.create(receiverOptions); Consumer<ReceiverRecord<Integer, String>> onNext = record -> { receiveLatch.countDown(); onReceive(record); log.info("onNext {}", record.value()); if (receiveLatch.getCount() == 10) throw new RuntimeException("Test exception"); record.receiverOffset().acknowledge(); }; Disposable disposable = receiver.receive() .doOnNext(onNext) .onErrorResume(e -> receiver.receive().doOnNext(onNext)) .subscribe(); subscribeDisposables.add(disposable); waitFoPartitionAssignment(); sendMessages(0, count); waitForMessages(receiveLatch); }
/** * Tests that all acknowledged offsets are committed during graceful close. */ @Test public void manualAckClose() throws Exception { receiverOptions = receiverOptions .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, 20); receiveAndVerify(20, r -> { if (r.receiverOffset().offset() < 5) r.receiverOffset().acknowledge(); return Mono.just(r); }); receivedMessages.removeIf(r -> r.offset() >= 5); consumerFactory.addConsumer(new MockConsumer(cluster)); receiveAndVerify(10); }
@Test public void atleastOnceClose() throws Exception { receiverOptions = receiverOptions.closeTimeout(Duration.ofMillis(1000)) .commitBatchSize(10) .commitInterval(Duration.ofMillis(60000)) .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> { if (receivedMessages.get(record.partition()).size() < 10) record.receiverOffset().acknowledge(); }); sendReceive(fluxWithAck, 0, 100, 0, 100); // Check that close commits ack'ed records, does not commit un-ack'ed records cancelSubscriptions(true); clearReceivedMessages(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux2 = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceive(kafkaFlux2, 100, 100, 10 * partitions, 200 - (10 * partitions)); }
if (receiveCount >= errorClearIndex) testReceiver.clearCommitError(); record.receiverOffset().acknowledge(); onNextSemaphore.release(); })
/** * Tests that all acknowledged records are committed on close */ @Test public void autoCommitClose() throws Exception { receiverOptions = receiverOptions .commitBatchSize(100) .commitInterval(Duration.ofMillis(Long.MAX_VALUE)) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, 20); receiveAndVerify(20, r -> { if (r.receiverOffset().offset() < 5) r.receiverOffset().acknowledge(); return Mono.just(r); }); receivedMessages.removeIf(r -> r.offset() >= 5); consumerFactory.addConsumer(new MockConsumer(cluster)); receiveAndVerify(10); }
DefaultKafkaReceiver<Integer, String> receiver = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions); Flux<ReceiverRecord<Integer, String>> inboundFlux = receiver.receive() .doOnNext(r -> r.receiverOffset().acknowledge()); try { receiver.receive(); receiveAndVerify(inboundFlux, 10); inboundFlux = receiver.receive().doOnNext(r -> r.receiverOffset().acknowledge()); sendMessages(topic, 10, 10); receiveAndVerify(inboundFlux, 10);