public ReceiverOffset processRecord(TopicPartition topicPartition, ReceiverRecord<Integer, Person> message) { log.info("Processing record {} from partition {} in thread{}", message.value().id(), topicPartition, Thread.currentThread().getName()); return message.receiverOffset(); } }
@Override public ReceiverOffset processRecord(TopicPartition topicPartition, ReceiverRecord<Integer, Person> message) { Person person = message.value(); received.add(person); partitionMap.get(message.partition()).add(person); return super.processRecord(topicPartition, message); }
public Disposable consumeMessages(String topic, CountDownLatch latch) { ReceiverOptions<Integer, String> options = receiverOptions.subscription(Collections.singleton(topic)) .addAssignListener(partitions -> log.debug("onPartitionsAssigned {}", partitions)) .addRevokeListener(partitions -> log.debug("onPartitionsRevoked {}", partitions)); Flux<ReceiverRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(options).receive(); return kafkaFlux.subscribe(record -> { ReceiverOffset offset = record.receiverOffset(); System.out.printf("Received message: topic-partition=%s offset=%d timestamp=%s key=%d value=%s\n", offset.topicPartition(), offset.offset(), dateFormat.format(new Date(record.timestamp())), record.key(), record.value()); offset.acknowledge(); latch.countDown(); }); }
public Flux<?> flux() { KafkaSender<Integer, Person> sender = sender(senderOptions()); return KafkaReceiver.create(receiverOptions(Collections.singleton(sourceTopic))) .receive() .map(m -> SenderRecord.create(transform(m.value()), m.receiverOffset())) .as(sender::send) .doOnNext(m -> m.correlationMetadata().acknowledge()) .doOnCancel(() -> close()); } public ProducerRecord<Integer, Person> transform(Person p) {
private void subscribeToDestTopic(String groupId, String topic, ReceiverOptions<Integer, Person> receiverOptions, List<Person> received) { receiverOptions = receiverOptions .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") .consumerProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId) .addAssignListener(partitions -> { log.debug("Group {} assigned {}", groupId, partitions); partitions.forEach(p -> log.trace("Group {} partition {} position {}", groupId, p, p.position())); }) .addRevokeListener(p -> log.debug("Group {} revoked {}", groupId, p)); Disposable c = KafkaReceiver.create(receiverOptions.subscription(Collections.singleton(topic))) .receive() .subscribe(m -> { Person p = m.value(); received.add(p); log.debug("Thread {} Received from {}: {} ", Thread.currentThread().getName(), m.topic(), p); }); disposables.add(c); } private CommittableSource createTestSource(int count, List<Person> expected) {
public Flux<?> flux() { return KafkaReceiver.create(receiverOptions(Collections.singletonList(topic)).commitInterval(Duration.ZERO)) .receive() .publishOn(scheduler) .concatMap(m -> storeInDB(m.value()) .thenEmpty(m.receiverOffset().commit())) .retry() .doOnCancel(() -> close()); } public Mono<Void> storeInDB(Person person) {
private SenderRecord<Integer, String, ReceiverOffset> toSenderRecord(String destTopic, ReceiverRecord<Integer, String> record) { return SenderRecord.create(destTopic, record.partition(), null, record.key(), record.value(), record.receiverOffset()); }
public SenderRecord<Integer, String, ReceiverOffset> toSenderRecord(String destTopic, ReceiverRecord<Integer, String> record) { return SenderRecord.<Integer, String, ReceiverOffset>create(destTopic, record.partition(), null, record.key(), record.value(), record.receiverOffset()); }
@Test public void resumeAfterFailure() throws Exception { int count = 20; CountDownLatch receiveLatch = new CountDownLatch(count + 1); receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") .addAssignListener(this::onPartitionsAssigned) .subscription(Collections.singletonList(topic)); KafkaReceiver<Integer, String> receiver = KafkaReceiver.create(receiverOptions); Consumer<ReceiverRecord<Integer, String>> onNext = record -> { receiveLatch.countDown(); onReceive(record); log.info("onNext {}", record.value()); if (receiveLatch.getCount() == 10) throw new RuntimeException("Test exception"); record.receiverOffset().acknowledge(); }; Disposable disposable = receiver.receive() .doOnNext(onNext) .onErrorResume(e -> receiver.receive().doOnNext(onNext)) .subscribe(); subscribeDisposables.add(disposable); waitFoPartitionAssignment(); sendMessages(0, count); waitForMessages(receiveLatch); }
.publishOn(consumerScheduler); Disposable disposable0 = kafkaSender .send(flux0.map(cr -> SenderRecord.create(topic, 1, null, cr.key(), cr.value(), cr.receiverOffset()))) .concatMap(sendResult -> sendResult.correlationMetadata()
ReceiverRecord<Integer, String> receiverRecord = receiverRecords.get(i); assertEquals(senderRecord.key(), receiverRecord.key()); assertEquals(senderRecord.value(), receiverRecord.value()); assertEquals(topic, receiverRecord.topic()); assertEquals(partition, receiverRecord.partition());