public Flux<?> flux() { sender = sender(senderOptions()); EmitterProcessor<Person> processor = EmitterProcessor.create(); FluxSink<Person> incoming = processor.sink(); Flux<?> inFlux = KafkaReceiver.create(receiverOptions(Collections.singleton(sourceTopic))) .receiveAutoAck() .concatMap(r -> r) .doOnNext(m -> incoming.next(m.value())); Flux<SenderResult<Integer>> stream1 = sender.send(processor.publishOn(scheduler1).map(p -> SenderRecord.create(process1(p, true), p.id()))); Flux<SenderResult<Integer>> stream2 = sender.send(processor.publishOn(scheduler2).map(p -> SenderRecord.create(process2(p, true), p.id()))); AtomicReference<Disposable> cancelRef = new AtomicReference<>(); Consumer<AtomicReference<Disposable>> cancel = cr -> { Disposable c = cr.getAndSet(null); if (c != null) c.dispose(); }; return Flux.merge(stream1, stream2) .doOnSubscribe(s -> cancelRef.set(inFlux.subscribe())) .doOnCancel(() -> { cancel.accept(cancelRef); close(); }) .doOnTerminate(() -> close()); } public ProducerRecord<Integer, Person> process1(Person p, boolean debug) {
@Test public void consumerClose() throws Exception { int count = 10; for (int i = 0; i < 2; i++) { Collection<ReceiverPartition> seekablePartitions = new ArrayList<>(); receiverOptions = receiverOptions .addAssignListener(partitions -> { seekablePartitions.addAll(partitions); assignSemaphore.release(); }) .subscription(Collections.singletonList(topic)); KafkaReceiver<Integer, String> receiver = KafkaReceiver.create(receiverOptions); Flux<ConsumerRecord<Integer, String>> kafkaFlux = receiver .receiveAutoAck() .concatMap(r -> r); Disposable disposable = sendAndWaitForMessages(kafkaFlux, count); assertTrue("No partitions assigned", seekablePartitions.size() > 0); if (i == 0) waitForCommits(receiver, count); disposable.dispose(); try { seekablePartitions.iterator().next().seekToBeginning(); fail("Consumer not closed"); } catch (IllegalStateException e) { // expected exception } } }
@Test public void autoCommitRetry() throws Exception { int count = 5; testAutoCommitFailureScenarios(true, count, 10, 0, 2); Flux<? extends ConsumerRecord<Integer, String>> flux = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceive(flux, count, count, count, count); }
@Test public void autoAck() throws Exception { KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = receiver.receiveAutoAck().concatMap(r -> r); sendReceive(kafkaFlux, 0, 100, 0, 100); waitForCommits(receiver, 100); // Close consumer and create another one. First consumer should commit final offset on close. // Second consumer should receive only new messages. cancelSubscriptions(true); clearReceivedMessages(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux2 = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceive(kafkaFlux2, 100, 100, 100, 100); }
@Test public void autoCommitNonRetriableException() throws Exception { int count = 5; receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); testAutoCommitFailureScenarios(false, count, 2, 0, 10); Flux<? extends ConsumerRecord<Integer, String>> flux = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceiveWithRedelivery(flux, count, count, 3, 5); }
@Test public void autoAckPollWithIntervalWillNotFailOnOverflow() throws Exception { ReceiverOptions<Integer, String> options = receiverOptions.addAssignListener(this::onPartitionsAssigned) .commitInterval(Duration.ofMillis(10)) .subscription(Collections.singletonList(topic)); KafkaReceiver<Integer, String> receiver = KafkaReceiver.create(options); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = receiver.receiveAutoAck().concatMap(r -> r); CountDownLatch latch = new CountDownLatch(100); subscribe(kafkaFlux, latch); embeddedKafka.shutdownBroker(brokerId); Thread.sleep(3000); embeddedKafka.startBroker(brokerId); sendMessagesSync(0, 100); waitForMessages(latch); checkConsumedMessages(0, 100); waitForCommits(receiver, 100); }
@Test public void atleastOnceClose() throws Exception { receiverOptions = receiverOptions.closeTimeout(Duration.ofMillis(1000)) .commitBatchSize(10) .commitInterval(Duration.ofMillis(60000)) .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> { if (receivedMessages.get(record.partition()).size() < 10) record.receiverOffset().acknowledge(); }); sendReceive(fluxWithAck, 0, 100, 0, 100); // Check that close commits ack'ed records, does not commit un-ack'ed records cancelSubscriptions(true); clearReceivedMessages(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux2 = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceive(kafkaFlux2, 100, 100, 10 * partitions, 200 - (10 * partitions)); }