/** * Creates a reactive Kafka receiver with the specified configuration options. * * @param options Configuration options of this receiver. Changes made to the options * after the receiver is created will not be used by the receiver. * A subscription using group management or a manual assignment of topic partitions * must be set on the options instance prior to creating this receiver. * @return new receiver instance */ public static <K, V> KafkaReceiver<K, V> create(ReceiverOptions<K, V> options) { return new DefaultKafkaReceiver<>(ConsumerFactory.INSTANCE, options); }
static EventScheduler newEvent(String groupId) { return new EventScheduler(groupId); }
void close() { dispose(true); }
@Override public Flux<Flux<ConsumerRecord<K, V>>> receiveAutoAck() { this.ackMode = AckMode.AUTO_ACK; Flux<ConsumerRecords<K, V>> flux = withDoOnRequest(createConsumerFlux()); return flux .map(consumerRecords -> Flux.fromIterable(consumerRecords) .doAfterTerminate(() -> { for (ConsumerRecord<K, V> r : consumerRecords) new CommittableOffset(r).acknowledge(); })); }
private void receiveAndVerify(int receiveCount) { DefaultKafkaReceiver<Integer, String> receiver = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions); Flux<ReceiverRecord<Integer, String>> inboundFlux = receiver .receive(); receiveAndVerify(inboundFlux, receiveCount); }
private void dispose() { boolean isEventsThread = eventScheduler.isCurrentThreadFromScheduler(); boolean isEventsEmitterAvailable = !(eventSubmission.isCancelled() || eventEmitter.isTerminated() || eventEmitter.isCancelled()); this.dispose(!isEventsThread && isEventsEmitterAvailable); }
@Test public void backPressureReceiveAtmostOnce() throws Exception { receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1") .subscription(Collections.singleton(topic)); Flux<?> flux = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions).receiveAtmostOnce(); testBackPressure(flux); }
@Test public void backPressureReceiveAutoAck() throws Exception { receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1") .subscription(Collections.singleton(topic)); Flux<?> flux = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions).receiveAutoAck(); testBackPressure(flux); }
@Override public <T> Mono<T> doOnConsumer(Function<org.apache.kafka.clients.consumer.Consumer<K, V>, ? extends T> function) { return Mono.create(monoSink -> { CustomEvent<T> event = new CustomEvent<>(function, monoSink); emit(event); }); }
private <T> Flux<T> withDoOnRequest(Flux<T> consumerFlux) { return consumerFlux.doOnRequest(toAdd -> { if (OperatorUtils.safeAddAndGet(requestsPending, toAdd) > 0) { pollEvent.scheduleIfRequired(); } }); }
private void sendReceiveAndVerify(int sendCount, int receiveCount) { sendMessages(topic, 0, sendCount); receiveAndVerify(receiveCount); }
@Override public Flux<Flux<ConsumerRecord<K, V>>> receiveAutoAck() { this.ackMode = AckMode.AUTO_ACK; Flux<ConsumerRecords<K, V>> flux = withDoOnRequest(createConsumerFlux()); return flux .map(consumerRecords -> Flux.fromIterable(consumerRecords) .doAfterTerminate(() -> { for (ConsumerRecord<K, V> r : consumerRecords) new CommittableOffset(r).acknowledge(); })); }
private void receiveAndVerify(int receiveCount, Function<ReceiverRecord<Integer, String>, Mono<ReceiverRecord<Integer, String>>> onNext) { DefaultKafkaReceiver<Integer, String> receiver = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions); Flux<ReceiverRecord<Integer, String>> inboundFlux = receiver .receive() .concatMap(r -> onNext.apply(r) .publishOn(receiver.scheduler), 1); receiveAndVerify(inboundFlux, receiveCount); }
private void dispose() { boolean isEventsThread = eventScheduler.isCurrentThreadFromScheduler(); boolean isEventsEmitterAvailable = !(eventSubmission.isCancelled() || eventEmitter.isTerminated() || eventEmitter.isCancelled()); this.dispose(!isEventsThread && isEventsEmitterAvailable); }
@Override public <T> Mono<T> doOnConsumer(Function<org.apache.kafka.clients.consumer.Consumer<K, V>, ? extends T> function) { return Mono.create(monoSink -> { CustomEvent<T> event = new CustomEvent<>(function, monoSink); emit(event); }); }
/** * Creates a reactive Kafka receiver with the specified configuration options. * * @param options Configuration options of this receiver. Changes made to the options * after the receiver is created will not be used by the receiver. * A subscription using group management or a manual assignment of topic partitions * must be set on the options instance prior to creating this receiver. * @return new receiver instance */ public static <K, V> KafkaReceiver<K, V> create(ReceiverOptions<K, V> options) { return new DefaultKafkaReceiver<>(ConsumerFactory.INSTANCE, options); }
static EventScheduler newEvent(String groupId) { return new EventScheduler(groupId); }
void close() { dispose(true); }