public ReceiverOptions<Integer, Person> receiverOptions(Collection<String> topics) { return receiverOptions() .addAssignListener(p -> log.info("Group {} partitions assigned {}", groupId, p)) .addRevokeListener(p -> log.info("Group {} partitions assigned {}", groupId, p)) .subscription(topics); }
ReactiveEndToEndLatency(Map<String, Object> consumerPropsOverride, Map<String, Object> producerPropsOverride, String bootstrapServers, String topic) { super(consumerPropsOverride, producerPropsOverride, bootstrapServers, topic); sender = KafkaSender.create(SenderOptions.create(producerProps)); ReceiverOptions<byte[], byte[]> receiverOptions = ReceiverOptions.<byte[], byte[]>create(consumerProps) .addAssignListener(partitions -> { if (assignSemaphore.availablePermits() == 0) { partitions.forEach(p -> p.seekToEnd()); assignSemaphore.release(); } }) .subscription(Collections.singleton(topic)); flux = KafkaReceiver.create(receiverOptions) .receive(); receiveQueue = new LinkedBlockingQueue<>(); System.out.println("Running latency test using Reactive API, class=" + this.getClass().getName()); } public void initialize() {
public Disposable consumeMessages(String topic, CountDownLatch latch) { ReceiverOptions<Integer, String> options = receiverOptions.subscription(Collections.singleton(topic)) .addAssignListener(partitions -> log.debug("onPartitionsAssigned {}", partitions)) .addRevokeListener(partitions -> log.debug("onPartitionsRevoked {}", partitions)); Flux<ReceiverRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(options).receive(); return kafkaFlux.subscribe(record -> { ReceiverOffset offset = record.receiverOffset(); System.out.printf("Received message: topic-partition=%s offset=%d timestamp=%s key=%d value=%s\n", offset.topicPartition(), offset.offset(), dateFormat.format(new Date(record.timestamp())), record.key(), record.value()); offset.acknowledge(); latch.countDown(); }); }
public KafkaReceiver<Integer, String> createReceiver() { receiverOptions = receiverOptions.addAssignListener(this::onPartitionsAssigned) .subscription(Collections.singletonList(topic)); return KafkaReceiver.create(receiverOptions); }
private void subscribeToDestTopic(String groupId, String topic, ReceiverOptions<Integer, Person> receiverOptions, List<Person> received) { receiverOptions = receiverOptions .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") .consumerProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId) .addAssignListener(partitions -> { log.debug("Group {} assigned {}", groupId, partitions); partitions.forEach(p -> log.trace("Group {} partition {} position {}", groupId, p, p.position())); }) .addRevokeListener(p -> log.debug("Group {} revoked {}", groupId, p)); Disposable c = KafkaReceiver.create(receiverOptions.subscription(Collections.singleton(topic))) .receive() .subscribe(m -> { Person p = m.value(); received.add(p); log.debug("Thread {} Received from {}: {} ", Thread.currentThread().getName(), m.topic(), p); }); disposables.add(c); } private CommittableSource createTestSource(int count, List<Person> expected) {
@Test public void wildcardSubscribe() throws Exception { receiverOptions = receiverOptions .addAssignListener(this::onPartitionsAssigned) .subscription(Pattern.compile("test.*")); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive(); sendReceive(kafkaFlux, 0, 10, 0, 10); }
/** * Tests that failure in seek in the assign listener terminates the inbound flux with an error. */ @Test public void seekFailure() throws Exception { sendMessages(topic, 0, 10); receiverOptions = receiverOptions .addAssignListener(partitions -> { for (ReceiverPartition p : partitions) p.seek(20); }) .subscription(Collections.singleton(topic)); receiveVerifyError(InvalidOffsetException.class); }
@Test public void publishFromEventScheduler() throws Exception { receiverOptions = receiverOptions .schedulerSupplier(Schedulers::immediate) .addAssignListener(this::onPartitionsAssigned) .subscription(Collections.singletonList(topic)); KafkaReceiver<Integer, String> receiver = KafkaReceiver.create(receiverOptions); AtomicReference<String> publishingThreadName = new AtomicReference<>(); CountDownLatch receiveLatch = new CountDownLatch(1); Disposable disposable = receiver.receive() .doOnNext(record -> { publishingThreadName.set(Thread.currentThread().getName()); record.receiverOffset().acknowledge(); receiveLatch.countDown(); }) .subscribe(); subscribeDisposables.add(disposable); waitFoPartitionAssignment(); sendMessages(0, 1); waitForMessages(receiveLatch); assertNotNull(publishingThreadName.get()); assertTrue(publishingThreadName.get().startsWith("reactive-kafka-")); }
@Before public void setUp() { topics = new ConcurrentHashMap<>(); for (int i : Arrays.asList(1, 2, 20, 200)) topics.put(i, "topic" + i); topic = topics.get(2); cluster = new MockCluster(2, topics); receiverOptions = ReceiverOptions.<Integer, String>create() .consumerProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId) .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") .addAssignListener(partitions -> { for (ReceiverPartition p : partitions) assignedPartitions.add(p.topicPartition()); }) .addRevokeListener(partitions -> { for (ReceiverPartition p : partitions) assignedPartitions.remove(p.topicPartition()); }); consumer = new MockConsumer(cluster); consumerFactory = new MockConsumer.Pool(Arrays.asList(consumer)); for (TopicPartition partition : cluster.partitions()) receiveStartOffsets.put(partition, 0L); }
@Test public void seekToBeginning() throws Exception { int count = 10; sendMessages(0, count); receiverOptions = receiverOptions .addAssignListener(this::seekToBeginning) .subscription(Collections.singletonList(topic)); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive(); sendReceive(kafkaFlux, count, count, 0, count * 2); }
@Test public void publishFromCustomScheduler() throws Exception { String schedulerName = "custom-scheduler"; Scheduler scheduler = Schedulers.newElastic(schedulerName); receiverOptions = receiverOptions .schedulerSupplier(() -> scheduler) .addAssignListener(this::onPartitionsAssigned) .subscription(Collections.singletonList(topic)); KafkaReceiver<Integer, String> receiver = KafkaReceiver.create(receiverOptions); AtomicReference<String> publishingThreadName = new AtomicReference<>(); CountDownLatch receiveLatch = new CountDownLatch(1); Disposable disposable = receiver.receive() .doOnNext(record -> { publishingThreadName.set(Thread.currentThread().getName()); record.receiverOffset().acknowledge(); receiveLatch.countDown(); }) .subscribe(); subscribeDisposables.add(scheduler); subscribeDisposables.add(disposable); waitFoPartitionAssignment(); sendMessages(0, 1); waitForMessages(receiveLatch); assertNotNull(publishingThreadName.get()); assertTrue(publishingThreadName.get().startsWith(schedulerName)); }
/** * Consume from latest offsets of partitions by seeking to end of all partitions in the assign listener. */ @Test public void seekToEnd() throws Exception { sendMessages(topic, 0, 10); Semaphore assignSemaphore = new Semaphore(0); receiverOptions = receiverOptions .addAssignListener(partitions -> { for (ReceiverPartition p : partitions) p.seekToEnd(); assignSemaphore.release(); }) .subscription(Collections.singleton(topic)); for (TopicPartition partition : cluster.partitions(topic)) receiveStartOffsets.put(partition, (long) cluster.log(partition).size()); CountDownLatch latch = new CountDownLatch(10); Disposable disposable = asyncReceive(latch); assertTrue("Assign callback not invoked", assignSemaphore.tryAcquire(1, TimeUnit.SECONDS)); sendMessages(topic, 10, 20); assertTrue("Messages not received", latch.await(1, TimeUnit.SECONDS)); verifyMessages(10); disposable.dispose(); }
/** * Consume from first available offset of partitions by seeking to start of all partitions in the assign listener. */ @Test public void seekToBeginning() throws Exception { sendMessages(topic, 0, 10); Semaphore assignSemaphore = new Semaphore(0); receiverOptions = receiverOptions .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest") .addAssignListener(partitions -> { for (ReceiverPartition p : partitions) p.seekToBeginning(); assignSemaphore.release(); }) .subscription(Collections.singleton(topic)); DefaultKafkaReceiver<Integer, String> receiver = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions); receiveWithOneOffAction(receiver, 10, 10, () -> sendMessages(topic, 10, 20)); assertTrue("Assign callback not invoked", assignSemaphore.tryAcquire(1, TimeUnit.SECONDS)); }
@Test public void seekToEnd() throws Exception { int count = 10; sendMessages(0, count); receiverOptions = receiverOptions .addAssignListener(partitions -> { for (ReceiverPartition partition : partitions) partition.seekToEnd(); onPartitionsAssigned(partitions); }) .subscription(Collections.singletonList(topic)); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive(); sendReceiveWithSendDelay(kafkaFlux, Duration.ofMillis(100), count, count); }
@Test public void seekToOffset() throws Exception { int count = 10; sendMessages(0, count); receiverOptions = receiverOptions .addAssignListener(partitions -> { onPartitionsAssigned(partitions); for (ReceiverPartition partition : partitions) partition.seek(1); }) .subscription(Collections.singletonList(topic)); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive() .doOnError(e -> log.error("KafkaFlux exception", e)); sendReceive(kafkaFlux, count, count, partitions, count * 2 - partitions); }
@Test public void autoAckPollWithIntervalWillNotFailOnOverflow() throws Exception { ReceiverOptions<Integer, String> options = receiverOptions.addAssignListener(this::onPartitionsAssigned) .commitInterval(Duration.ofMillis(10)) .subscription(Collections.singletonList(topic)); KafkaReceiver<Integer, String> receiver = KafkaReceiver.create(options); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = receiver.receiveAutoAck().concatMap(r -> r); CountDownLatch latch = new CountDownLatch(100); subscribe(kafkaFlux, latch); embeddedKafka.shutdownBroker(brokerId); Thread.sleep(3000); embeddedKafka.startBroker(brokerId); sendMessagesSync(0, 100); waitForMessages(latch); checkConsumedMessages(0, 100); waitForCommits(receiver, 100); }
@Test public void resumeAfterFailure() throws Exception { int count = 20; CountDownLatch receiveLatch = new CountDownLatch(count + 1); receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") .addAssignListener(this::onPartitionsAssigned) .subscription(Collections.singletonList(topic)); KafkaReceiver<Integer, String> receiver = KafkaReceiver.create(receiverOptions); Consumer<ReceiverRecord<Integer, String>> onNext = record -> { receiveLatch.countDown(); onReceive(record); log.info("onNext {}", record.value()); if (receiveLatch.getCount() == 10) throw new RuntimeException("Test exception"); record.receiverOffset().acknowledge(); }; Disposable disposable = receiver.receive() .doOnNext(onNext) .onErrorResume(e -> receiver.receive().doOnNext(onNext)) .subscribe(); subscribeDisposables.add(disposable); waitFoPartitionAssignment(); sendMessages(0, count); waitForMessages(receiveLatch); }
@Test public void messageProcessorFailure() throws Exception { int count = 200; int successfulReceives = 100; CountDownLatch receiveLatch = new CountDownLatch(successfulReceives + 1); receiverOptions = receiverOptions .addAssignListener(this::onPartitionsAssigned) .subscription(Collections.singletonList(topic)); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive() .publishOn(Schedulers.single()) .doOnNext(record -> { receiveLatch.countDown(); if (receiveLatch.getCount() == 0) throw new RuntimeException("Test exception"); record.receiverOffset().acknowledge(); }); CountDownLatch latch = new CountDownLatch(successfulReceives); subscribe(kafkaFlux, latch); sendMessages(0, count); waitForMessages(latch); TestUtils.sleep(100); assertEquals(successfulReceives, count(receivedMessages)); }
@Test public void offsetResetLatest() throws Exception { int count = 10; sendMessages(0, count); receiverOptions = receiverOptions .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest") .addAssignListener(partitions -> assignSemaphore.release()); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = createReceiver() .receive() .doOnNext(record -> onReceive(record)); StepVerifier.create(kafkaFlux) .then(() -> assignSemaphore.acquireUninterruptibly()) .expectNoEvent(Duration.ofMillis(100)) .then(() -> sendMessages(count, count)) .expectNextCount(count) .thenCancel() .verify(Duration.ofSeconds(receiveTimeoutMillis)); checkConsumedMessages(count, count); }
@Test public void manualCommitRecordAsync() throws Exception { int count = 10; CountDownLatch commitLatch = new CountDownLatch(count); long[] committedOffsets = new long[partitions]; receiverOptions = receiverOptions .commitInterval(Duration.ZERO) .commitBatchSize(0) .addAssignListener(this::seekToBeginning) .subscription(Collections.singletonList(topic)); Flux<ReceiverRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive() .doOnNext(record -> record.receiverOffset() .commit() .doOnSuccess(i -> onCommit(record, commitLatch, committedOffsets)) .doOnError(e -> log.error("Commit exception", e)) .subscribe()); subscribe(kafkaFlux, new CountDownLatch(count)); sendMessages(0, count); checkCommitCallbacks(commitLatch, committedOffsets); }