public ReceiverOptions<Integer, Person> receiverOptions(Collection<String> topics) { return receiverOptions() .addAssignListener(p -> log.info("Group {} partitions assigned {}", groupId, p)) .addRevokeListener(p -> log.info("Group {} partitions assigned {}", groupId, p)) .subscription(topics); }
ReactiveEndToEndLatency(Map<String, Object> consumerPropsOverride, Map<String, Object> producerPropsOverride, String bootstrapServers, String topic) { super(consumerPropsOverride, producerPropsOverride, bootstrapServers, topic); sender = KafkaSender.create(SenderOptions.create(producerProps)); ReceiverOptions<byte[], byte[]> receiverOptions = ReceiverOptions.<byte[], byte[]>create(consumerProps) .addAssignListener(partitions -> { if (assignSemaphore.availablePermits() == 0) { partitions.forEach(p -> p.seekToEnd()); assignSemaphore.release(); } }) .subscription(Collections.singleton(topic)); flux = KafkaReceiver.create(receiverOptions) .receive(); receiveQueue = new LinkedBlockingQueue<>(); System.out.println("Running latency test using Reactive API, class=" + this.getClass().getName()); } public void initialize() {
/** * Send and receive one message. */ @Test public void receiveOne() { receiverOptions = receiverOptions.subscription(Collections.singleton(topic)); sendReceiveAndVerify(1, 1); }
/** * Send and receive messages from multiple partitions using one receiver. */ @Test public void receiveMultiplePartitions() { receiverOptions = receiverOptions.subscription(Collections.singleton(topic)); sendReceiveAndVerify(10, 10); }
public KafkaReceiver<Integer, String> createReceiver() { receiverOptions = receiverOptions.addAssignListener(this::onPartitionsAssigned) .subscription(Collections.singletonList(topic)); return KafkaReceiver.create(receiverOptions); }
/** * Send and receive using wildcard subscription with group management. */ @Test public void wildcardSubscription() { receiverOptions = receiverOptions.subscription(Pattern.compile("[a-z]*2")); sendReceiveAndVerify(10, 10); }
/** * Tests that downstream exceptions terminate the inbound flux gracefully. */ @Test public void messageProcessorFailure() throws Exception { int count = 10; receiverOptions = receiverOptions .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, count); receiveVerifyError(RuntimeException.class, record -> { receivedMessages.add(record); if (receivedMessages.size() == 1) return Mono.error(new RuntimeException("Failing onNext")); return Mono.just(record); }); assertTrue("Consumer not closed", consumer.closed()); }
/** * Tests that assign callbacks are invoked before any records are delivered * when partitions are assigned using group management. */ @Test public void assignCallback() { receiverOptions = receiverOptions.subscription(Collections.singleton(topic)); sendMessages(topic, 0, 10); receiveAndVerify(10, r -> { assertTrue("Assign callback not invoked", assignedPartitions.contains(r.receiverOffset().topicPartition())); return Mono.just(r); }); }
@Test public void backPressureReceiveAtmostOnce() throws Exception { receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1") .subscription(Collections.singleton(topic)); Flux<?> flux = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions).receiveAtmostOnce(); testBackPressure(flux); }
@Test public void backPressureReceiveAutoAck() throws Exception { receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1") .subscription(Collections.singleton(topic)); Flux<?> flux = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions).receiveAutoAck(); testBackPressure(flux); }
@Test public void backPressureReceive() throws Exception { receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1") .subscription(Collections.singleton(topic)); Flux<?> flux = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions).receive(); testBackPressure(flux); }
@Test public void wildcardSubscribe() throws Exception { receiverOptions = receiverOptions .addAssignListener(this::onPartitionsAssigned) .subscription(Pattern.compile("test.*")); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive(); sendReceive(kafkaFlux, 0, 10, 0, 10); }
/** * Tests that failure in seek in the assign listener terminates the inbound flux with an error. */ @Test public void seekFailure() throws Exception { sendMessages(topic, 0, 10); receiverOptions = receiverOptions .addAssignListener(partitions -> { for (ReceiverPartition p : partitions) p.seek(20); }) .subscription(Collections.singleton(topic)); receiveVerifyError(InvalidOffsetException.class); }
@Test public void seekToBeginning() throws Exception { int count = 10; sendMessages(0, count); receiverOptions = receiverOptions .addAssignListener(this::seekToBeginning) .subscription(Collections.singletonList(topic)); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive(); sendReceive(kafkaFlux, count, count, 0, count * 2); }
/** * Consume from first available offset of partitions by seeking to start of all partitions in the assign listener. */ @Test public void seekToBeginning() throws Exception { sendMessages(topic, 0, 10); Semaphore assignSemaphore = new Semaphore(0); receiverOptions = receiverOptions .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest") .addAssignListener(partitions -> { for (ReceiverPartition p : partitions) p.seekToBeginning(); assignSemaphore.release(); }) .subscription(Collections.singleton(topic)); DefaultKafkaReceiver<Integer, String> receiver = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions); receiveWithOneOffAction(receiver, 10, 10, () -> sendMessages(topic, 10, 20)); assertTrue("Assign callback not invoked", assignSemaphore.tryAcquire(1, TimeUnit.SECONDS)); }
@Test public void shouldNotOverflowOnLongMaxValuePlus1WhichHappensInCaseOfSkip1() { receiverOptions = receiverOptions .subscription(Collections.singleton(topic)); DefaultKafkaReceiver<Integer, String> receiver = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions); Flux<ReceiverRecord<Integer, String>> inboundFlux = receiver.receive() .skip(1); sendMessages(topic, 0, 3); StepVerifier.create(inboundFlux) .expectNextCount(2) .thenCancel() .verify(Duration.ofMillis(DEFAULT_TEST_TIMEOUT)); }
@Test public void manualCommitRetry() throws Exception { consumer.addCommitException(new RetriableCommitFailedException("coordinator failed"), 2); int count = 10; receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ofMillis(Long.MAX_VALUE)) .maxCommitAttempts(1) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, count + 10); receiveAndVerify(10, record -> record.receiverOffset().commit().retry().then(Mono.just(record))); verifyCommits(groupId, topic, 10); }
/** * Tests that all acknowledged offsets are committed during graceful close. */ @Test public void manualAckClose() throws Exception { receiverOptions = receiverOptions .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, 20); receiveAndVerify(20, r -> { if (r.receiverOffset().offset() < 5) r.receiverOffset().acknowledge(); return Mono.just(r); }); receivedMessages.removeIf(r -> r.offset() >= 5); consumerFactory.addConsumer(new MockConsumer(cluster)); receiveAndVerify(10); }
/** * Tests that transient commit failures are retried with {@link KafkaReceiver#receiveAtmostOnce()}. */ @Test public void atmostOnceCommitAttempts() throws Exception { consumer.addCommitException(new RetriableCommitFailedException("coordinator failed"), 2); receiverOptions = receiverOptions .maxCommitAttempts(10) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, 20); Flux<? extends ConsumerRecord<Integer, String>> inboundFlux = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions) .receiveAtmostOnce(); verifyMessages(inboundFlux.take(10), 10); verifyCommits(groupId, topic, 10); }
/** * Tests {@link KafkaReceiver#receiveAtmostOnce()} good path without failures. */ @Test public void atmostOnce() { receiverOptions = receiverOptions .subscription(Collections.singleton(topic)); sendMessages(topic, 0, 20); Flux<? extends ConsumerRecord<Integer, String>> inboundFlux = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions) .receiveAtmostOnce() .filter(r -> cluster.committedOffset(groupId, topicPartition(r)) >= r.offset()); verifyMessages(inboundFlux.take(10), 10); verifyCommits(groupId, topic, 10); }