public ReceiverOffset processRecord(TopicPartition topicPartition, ReceiverRecord<Integer, Person> message) { log.info("Processing record {} from partition {} in thread{}", message.value().id(), topicPartition, Thread.currentThread().getName()); return message.receiverOffset(); } }
public Disposable consumeMessages(String topic, CountDownLatch latch) { ReceiverOptions<Integer, String> options = receiverOptions.subscription(Collections.singleton(topic)) .addAssignListener(partitions -> log.debug("onPartitionsAssigned {}", partitions)) .addRevokeListener(partitions -> log.debug("onPartitionsRevoked {}", partitions)); Flux<ReceiverRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(options).receive(); return kafkaFlux.subscribe(record -> { ReceiverOffset offset = record.receiverOffset(); System.out.printf("Received message: topic-partition=%s offset=%d timestamp=%s key=%d value=%s\n", offset.topicPartition(), offset.offset(), dateFormat.format(new Date(record.timestamp())), record.key(), record.value()); offset.acknowledge(); latch.countDown(); }); }
private void verifyCommit(ReceiverRecord<Integer, String> r, long lastCommitted) { TopicPartition partition = r.receiverOffset().topicPartition(); Long committedOffset = cluster.committedOffset(groupId, partition); long offset = r.receiverOffset().offset(); if (lastCommitted >= 0 && offset == lastCommitted) { TestUtils.waitUntil("Offset not committed", null, p -> cluster.committedOffset(groupId, p) == (Long) (offset + 1), partition, Duration.ofSeconds(1)); } committedOffset = cluster.committedOffset(groupId, partition); assertEquals(committedOffset, lastCommitted == -1 ? null : lastCommitted + 1); }
public Flux<?> flux() { Scheduler scheduler = Schedulers.newElastic("sample", 60, true); return KafkaReceiver.create(receiverOptions(Collections.singleton(topic)).commitInterval(Duration.ZERO)) .receive() .groupBy(m -> m.receiverOffset().topicPartition()) .flatMap(partitionFlux -> partitionFlux.publishOn(scheduler) .map(r -> processRecord(partitionFlux.key(), r)) .sample(Duration.ofMillis(5000)) .concatMap(offset -> offset.commit())) .doOnCancel(() -> close()); } public ReceiverOffset processRecord(TopicPartition topicPartition, ReceiverRecord<Integer, Person> message) {
/** * Tests that assign callbacks are invoked before any records are delivered * when partitions are assigned using group management. */ @Test public void assignCallback() { receiverOptions = receiverOptions.subscription(Collections.singleton(topic)); sendMessages(topic, 0, 10); receiveAndVerify(10, r -> { assertTrue("Assign callback not invoked", assignedPartitions.contains(r.receiverOffset().topicPartition())); return Mono.just(r); }); }
public Flux<?> flux() { KafkaSender<Integer, Person> sender = sender(senderOptions()); return KafkaReceiver.create(receiverOptions(Collections.singleton(sourceTopic))) .receive() .map(m -> SenderRecord.create(transform(m.value()), m.receiverOffset())) .as(sender::send) .doOnNext(m -> m.correlationMetadata().acknowledge()) .doOnCancel(() -> close()); } public ProducerRecord<Integer, Person> transform(Person p) {
/** * Send and receive using manual assignment of partitions. */ @Test public void manualAssignment() { receiverOptions = receiverOptions.assignment(cluster.partitions(topic)); sendMessages(topic, 0, 10); receiveAndVerify(10, r -> { assertTrue("Assign callback not invoked", assignedPartitions.contains(r.receiverOffset().topicPartition())); return Mono.just(r); }); }
public Flux<?> flux() { return KafkaReceiver.create(receiverOptions(Collections.singletonList(topic)).commitInterval(Duration.ZERO)) .receive() .publishOn(scheduler) .concatMap(m -> storeInDB(m.value()) .thenEmpty(m.receiverOffset().commit())) .retry() .doOnCancel(() -> close()); } public Mono<Void> storeInDB(Person person) {
private SenderRecord<Integer, String, ReceiverOffset> toSenderRecord(String destTopic, ReceiverRecord<Integer, String> record) { return SenderRecord.create(destTopic, record.partition(), null, record.key(), record.value(), record.receiverOffset()); }
public SenderRecord<Integer, String, ReceiverOffset> toSenderRecord(String destTopic, ReceiverRecord<Integer, String> record) { return SenderRecord.<Integer, String, ReceiverOffset>create(destTopic, record.partition(), null, record.key(), record.value(), record.receiverOffset()); }
/** * Tests that all acknowledged offsets are committed during graceful close. */ @Test public void manualAckClose() throws Exception { receiverOptions = receiverOptions .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, 20); receiveAndVerify(20, r -> { if (r.receiverOffset().offset() < 5) r.receiverOffset().acknowledge(); return Mono.just(r); }); receivedMessages.removeIf(r -> r.offset() >= 5); consumerFactory.addConsumer(new MockConsumer(cluster)); receiveAndVerify(10); }
@Test public void atleastOnceCommitInterval() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(Integer.MAX_VALUE); receiverOptions.commitInterval(Duration.ofMillis(1000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); Thread.sleep(1500); restartAndCheck(receiver, 100, 100, 0); }
@Test public void atleastOnceCommitRecord() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(1); receiverOptions.commitInterval(Duration.ofMillis(60000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); // Atmost one record may be redelivered restartAndCheck(receiver, 100, 100, 1); }
/** * Tests manual commits for {@link KafkaReceiver#receive()} with synchronous commits * after message processing. */ @Test public void manualCommitSync() throws Exception { int count = 10; receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ofMillis(Long.MAX_VALUE)) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, count + 10); receiveAndVerify(10, record -> { StepVerifier.create(record.receiverOffset().commit()).expectComplete().verify(Duration.ofMillis(DEFAULT_TEST_TIMEOUT)); return Mono.just(record); }); verifyCommits(groupId, topic, 10); }
@Test public void manualAssignmentWithCommit() throws Exception { receiverOptions = receiverOptions.commitInterval(Duration.ZERO) .commitBatchSize(0) .assignment(getTopicPartitions()); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive() .delayUntil(r -> r.receiverOffset().commit()) .doOnSubscribe(s -> assignSemaphore.release()); sendReceiveWithSendDelay(kafkaFlux, Duration.ofMillis(1000), 0, 10); }
@Test public void atleastOnceCommitBatchSize() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(10); receiverOptions.commitInterval(Duration.ofMillis(60000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); /// Atmost batchSize records may be redelivered restartAndCheck(receiver, 100, 100, receiverOptions.commitBatchSize()); }
@Test public void manualCommitRetry() throws Exception { consumer.addCommitException(new RetriableCommitFailedException("coordinator failed"), 2); int count = 10; receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ofMillis(Long.MAX_VALUE)) .maxCommitAttempts(1) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, count + 10); receiveAndVerify(10, record -> record.receiverOffset().commit().retry().then(Mono.just(record))); verifyCommits(groupId, topic, 10); }
/** * Tests that commits are retried if the failure is transient and the manual commit Mono * is not failed if the commit succeeds within the configured number of attempts. */ @Test public void manualCommitAttempts() throws Exception { consumer.addCommitException(new RetriableCommitFailedException("coordinator failed"), 2); int count = 10; receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ofMillis(Long.MAX_VALUE)) .maxCommitAttempts(10) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, count + 10); receiveAndVerify(10, record -> record.receiverOffset().commit().then(Mono.just(record))); verifyCommits(groupId, topic, 10); }
@Test public void atleastOnceClose() throws Exception { receiverOptions = receiverOptions.closeTimeout(Duration.ofMillis(1000)) .commitBatchSize(10) .commitInterval(Duration.ofMillis(60000)) .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> { if (receivedMessages.get(record.partition()).size() < 10) record.receiverOffset().acknowledge(); }); sendReceive(fluxWithAck, 0, 100, 0, 100); // Check that close commits ack'ed records, does not commit un-ack'ed records cancelSubscriptions(true); clearReceivedMessages(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux2 = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceive(kafkaFlux2, 100, 100, 10 * partitions, 200 - (10 * partitions)); }
@Test public void manualCommitSync() throws Exception { int count = 10; CountDownLatch commitLatch = new CountDownLatch(count); long[] committedOffsets = new long[partitions]; for (int i = 0; i < committedOffsets.length; i++) committedOffsets[i] = 0; receiverOptions = receiverOptions.commitInterval(Duration.ZERO).commitBatchSize(0); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = receiver.receive() .delayUntil(record -> { assertEquals(committedOffsets[record.partition()], record.offset()); return record.receiverOffset().commit() .doOnSuccess(i -> onCommit(record, commitLatch, committedOffsets)); }) .doOnError(e -> log.error("KafkaFlux exception", e)); sendAndWaitForMessages(kafkaFlux, count); checkCommitCallbacks(commitLatch, committedOffsets); }