private SenderRecord<Integer, String, ReceiverOffset> toSenderRecord(String destTopic, ReceiverRecord<Integer, String> record) { return SenderRecord.create(destTopic, record.partition(), null, record.key(), record.value(), record.receiverOffset()); }
@Override public Flux<ReceiverRecord<K, V>> receive() { this.ackMode = AckMode.MANUAL_ACK; Flux<ConsumerRecord<K, V>> flux = createConsumerFlux() .concatMap(Flux::fromIterable, Integer.MAX_VALUE); return withDoOnRequest(flux) .map(r -> { TopicPartition topicPartition = new TopicPartition(r.topic(), r.partition()); CommittableOffset committableOffset = new CommittableOffset(topicPartition, r.offset()); return new ReceiverRecord<>(r, committableOffset); }); }
public Disposable consumeMessages(String topic, CountDownLatch latch) { ReceiverOptions<Integer, String> options = receiverOptions.subscription(Collections.singleton(topic)) .addAssignListener(partitions -> log.debug("onPartitionsAssigned {}", partitions)) .addRevokeListener(partitions -> log.debug("onPartitionsRevoked {}", partitions)); Flux<ReceiverRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(options).receive(); return kafkaFlux.subscribe(record -> { ReceiverOffset offset = record.receiverOffset(); System.out.printf("Received message: topic-partition=%s offset=%d timestamp=%s key=%d value=%s\n", offset.topicPartition(), offset.offset(), dateFormat.format(new Date(record.timestamp())), record.key(), record.value()); offset.acknowledge(); latch.countDown(); }); }
public ReceiverOffset processRecord(TopicPartition topicPartition, ReceiverRecord<Integer, Person> message) { log.info("Processing record {} from partition {} in thread{}", message.value().id(), topicPartition, Thread.currentThread().getName()); return message.receiverOffset(); } }
SenderRecord<Integer, String, Integer> senderRecord = senderRecords.get(i); ReceiverRecord<Integer, String> receiverRecord = receiverRecords.get(i); assertEquals(senderRecord.key(), receiverRecord.key()); assertEquals(senderRecord.value(), receiverRecord.value()); assertEquals(topic, receiverRecord.topic()); assertEquals(partition, receiverRecord.partition()); assertEquals(senderRecord.timestamp().longValue(), receiverRecord.timestamp()); assertEquals(2, receiverRecord.headers().toArray().length); assertEquals(senderRecord.headers(), receiverRecord.headers());
private void verifyCommit(ReceiverRecord<Integer, String> r, long lastCommitted) { TopicPartition partition = r.receiverOffset().topicPartition(); Long committedOffset = cluster.committedOffset(groupId, partition); long offset = r.receiverOffset().offset(); if (lastCommitted >= 0 && offset == lastCommitted) { TestUtils.waitUntil("Offset not committed", null, p -> cluster.committedOffset(groupId, p) == (Long) (offset + 1), partition, Duration.ofSeconds(1)); } committedOffset = cluster.committedOffset(groupId, partition); assertEquals(committedOffset, lastCommitted == -1 ? null : lastCommitted + 1); }
kafkaFlux.groupBy(m -> m.receiverOffset().topicPartition()) .subscribe(partitionFlux -> subscribeDisposables.add(partitionFlux.publishOn(scheduler).subscribe(record -> { int partition = record.partition(); String current = Thread.currentThread().getName() + ":" + record.offset(); String inProgress = inProgressMap.putIfAbsent(partition, current); if (inProgress != null) { onReceive(record); latch.countDown(); record.receiverOffset().acknowledge(); inProgressMap.remove(partition); })));
.publishOn(consumerScheduler); Disposable disposable0 = kafkaSender .send(flux0.map(cr -> SenderRecord.create(topic, 1, null, cr.key(), cr.value(), cr.receiverOffset()))) .concatMap(sendResult -> sendResult.correlationMetadata()
Disposable disposable = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions) .receive() .groupBy(m -> m.receiverOffset().topicPartition()) .subscribe(partitionFlux -> groupDisposables.add(partitionFlux.publishOn(scheduler, 1).subscribe(record -> { int partition = record.partition(); String thread = Thread.currentThread().getName(); Set<Integer> partitionSet = threadMap.get(thread);
private void subscribeToDestTopic(String groupId, String topic, ReceiverOptions<Integer, Person> receiverOptions, List<Person> received) { receiverOptions = receiverOptions .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") .consumerProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId) .addAssignListener(partitions -> { log.debug("Group {} assigned {}", groupId, partitions); partitions.forEach(p -> log.trace("Group {} partition {} position {}", groupId, p, p.position())); }) .addRevokeListener(p -> log.debug("Group {} revoked {}", groupId, p)); Disposable c = KafkaReceiver.create(receiverOptions.subscription(Collections.singleton(topic))) .receive() .subscribe(m -> { Person p = m.value(); received.add(p); log.debug("Thread {} Received from {}: {} ", Thread.currentThread().getName(), m.topic(), p); }); disposables.add(c); } private CommittableSource createTestSource(int count, List<Person> expected) {
@Test public void transactionBeginAbort() throws Exception { int count = 30; sendMessages(srcTopic, 0, count); TransactionManager transactionManager = sender.transactionManager(); Flux<?> receiveAndSend = receiver.receive() .publishOn(transactionManager.scheduler()) .take(count) .window(20) .concatMapDelayError(f -> { Transaction t = new Transaction(transactionManager, groupId); return sender.send(f.map(r -> { if (r.key() == 25) throw new RuntimeException("Test exception"); else return toSenderRecord(destTopic, r, t); })) .then(t.commitAndBegin()); }, false, 1) .onErrorResume(e -> transactionManager.abort().then(Mono.error(e))); StepVerifier.create(sender.transactionManager().begin().thenMany(receiveAndSend)) .expectErrorMessage("Test exception") .verify(Duration.ofMillis(DEFAULT_TEST_TIMEOUT)); verifyTransaction(count, 20); assertEquals(2, producer.beginCount); assertEquals(1, producer.commitCount); assertEquals(1, producer.abortCount); assertEquals(1, producer.sendOffsetsCount); }
public Flux<?> flux() { KafkaSender<Integer, Person> sender = sender(senderOptions()); return KafkaReceiver.create(receiverOptions(Collections.singleton(sourceTopic))) .receive() .map(m -> SenderRecord.create(transform(m.value()), m.receiverOffset())) .as(sender::send) .doOnNext(m -> m.correlationMetadata().acknowledge()) .doOnCancel(() -> close()); } public ProducerRecord<Integer, Person> transform(Person p) {
public Flux<?> flux() { Scheduler scheduler = Schedulers.newElastic("sample", 60, true); return KafkaReceiver.create(receiverOptions(Collections.singleton(topic)).commitInterval(Duration.ZERO)) .receive() .groupBy(m -> m.receiverOffset().topicPartition()) .flatMap(partitionFlux -> partitionFlux.publishOn(scheduler) .map(r -> processRecord(partitionFlux.key(), r)) .sample(Duration.ofMillis(5000)) .concatMap(offset -> offset.commit())) .doOnCancel(() -> close()); } public ReceiverOffset processRecord(TopicPartition topicPartition, ReceiverRecord<Integer, Person> message) {
@Test public void manualCommitSync() throws Exception { int count = 10; CountDownLatch commitLatch = new CountDownLatch(count); long[] committedOffsets = new long[partitions]; for (int i = 0; i < committedOffsets.length; i++) committedOffsets[i] = 0; receiverOptions = receiverOptions.commitInterval(Duration.ZERO).commitBatchSize(0); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = receiver.receive() .delayUntil(record -> { assertEquals(committedOffsets[record.partition()], record.offset()); return record.receiverOffset().commit() .doOnSuccess(i -> onCommit(record, commitLatch, committedOffsets)); }) .doOnError(e -> log.error("KafkaFlux exception", e)); sendAndWaitForMessages(kafkaFlux, count); checkCommitCallbacks(commitLatch, committedOffsets); }
Disposable disposable = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions) .receive() .groupBy(m -> m.receiverOffset().topicPartition().partition()) .subscribe(partitionFlux -> groupDisposables.add(partitionFlux.take(countPerPartition).publishOn(scheduler, 1).subscribe(record -> { String thread = Thread.currentThread().getName(); int partition = record.partition(); Set<Integer> partitionSet = threadMap.get(thread); if (partitionSet == null) {
public SenderRecord<Integer, String, ReceiverOffset> toSenderRecord(String destTopic, ReceiverRecord<Integer, String> record) { return SenderRecord.<Integer, String, ReceiverOffset>create(destTopic, record.partition(), null, record.key(), record.value(), record.receiverOffset()); }
public Flux<?> flux() { return KafkaReceiver.create(receiverOptions(Collections.singletonList(topic)).commitInterval(Duration.ZERO)) .receive() .publishOn(scheduler) .concatMap(m -> storeInDB(m.value()) .thenEmpty(m.receiverOffset().commit())) .retry() .doOnCancel(() -> close()); } public Mono<Void> storeInDB(Person person) {