/** * Returns the {@link KafkaConsumer#subscribe(Collection, ConsumerRebalanceListener)}, * {@link KafkaConsumer#subscribe(Pattern, ConsumerRebalanceListener)} or {@link KafkaConsumer#assign(Collection)} * operation corresponding to the subscription or assignment options configured for this instance. * @return subscribe or assign operation with rebalance listeners corresponding to this options instance */ @NonNull default Consumer<org.apache.kafka.clients.consumer.Consumer<K, V>> subscriber(@NonNull ConsumerRebalanceListener listener) { Objects.requireNonNull(listener); if (subscriptionTopics() != null) return consumer -> consumer.subscribe(subscriptionTopics(), listener); else if (subscriptionPattern() != null) return consumer -> consumer.subscribe(subscriptionPattern(), listener); else if (assignment() != null) return consumer -> { consumer.assign(assignment()); listener.onPartitionsAssigned(assignment()); }; else throw new IllegalStateException("No subscriptions have been created"); }
/** * Returns the {@link KafkaConsumer#subscribe(Collection, ConsumerRebalanceListener)}, * {@link KafkaConsumer#subscribe(Pattern, ConsumerRebalanceListener)} or {@link KafkaConsumer#assign(Collection)} * operation corresponding to the subscription or assignment options configured for this instance. * @return subscribe or assign operation with rebalance listeners corresponding to this options instance */ @NonNull default Consumer<org.apache.kafka.clients.consumer.Consumer<K, V>> subscriber(@NonNull ConsumerRebalanceListener listener) { Objects.requireNonNull(listener); if (subscriptionTopics() != null) return consumer -> consumer.subscribe(subscriptionTopics(), listener); else if (subscriptionPattern() != null) return consumer -> consumer.subscribe(subscriptionPattern(), listener); else if (assignment() != null) return consumer -> { consumer.assign(assignment()); listener.onPartitionsAssigned(assignment()); }; else throw new IllegalStateException("No subscriptions have been created"); }
@Test public void manualAssignment() throws Exception { receiverOptions = receiverOptions .assignment(getTopicPartitions()); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive() .doOnSubscribe(s -> assignSemaphore.release()); sendReceiveWithSendDelay(kafkaFlux, Duration.ofMillis(1000), 0, 10); }
/** * Send and receive using manual assignment of partitions. */ @Test public void manualAssignment() { receiverOptions = receiverOptions.assignment(cluster.partitions(topic)); sendMessages(topic, 0, 10); receiveAndVerify(10, r -> { assertTrue("Assign callback not invoked", assignedPartitions.contains(r.receiverOffset().topicPartition())); return Mono.just(r); }); }
@Test public void manualAssignmentWithCommit() throws Exception { receiverOptions = receiverOptions.commitInterval(Duration.ZERO) .commitBatchSize(0) .assignment(getTopicPartitions()); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive() .delayUntil(r -> r.receiverOffset().commit()) .doOnSubscribe(s -> assignSemaphore.release()); sendReceiveWithSendDelay(kafkaFlux, Duration.ofMillis(1000), 0, 10); }
.commitInterval(Duration.ZERO) .commitBatchSize(0) .assignment(topicPartitions) .addAssignListener(partitions -> { for (ReceiverPartition p : partitions) {
.addAssignListener(this::seekToBeginning) .addAssignListener(this::onPartitionsAssigned) .assignment(Collections.singletonList(new TopicPartition(topic, 1))) .commitBatchSize(0) .addAssignListener(this::seekToBeginning) .assignment(Collections.singletonList(new TopicPartition(topic, 0)))
ImmutableReceiverOptions(ReceiverOptions<K, V> options) { this( options.consumerProperties(), options.assignListeners(), options.revokeListeners(), options.keyDeserializer(), options.valueDeserializer(), options.pollTimeout(), options.closeTimeout(), options.commitInterval(), options.commitBatchSize(), options.atmostOnceCommitAheadSize(), options.maxCommitAttempts(), options.subscriptionTopics(), options.assignment(), options.subscriptionPattern(), options.schedulerSupplier() ); }
ImmutableReceiverOptions(ReceiverOptions<K, V> options) { this( options.consumerProperties(), options.assignListeners(), options.revokeListeners(), options.keyDeserializer(), options.valueDeserializer(), options.pollTimeout(), options.closeTimeout(), options.commitInterval(), options.commitBatchSize(), options.atmostOnceCommitAheadSize(), options.maxCommitAttempts(), options.subscriptionTopics(), options.assignment(), options.subscriptionPattern(), options.schedulerSupplier() ); }
long consumed = consumedOffsets.get(topicPartition); consumerFactory.addConsumer(new MockConsumer(cluster)); receiverOptions = receiverOptions.assignment(Collections.singleton(topicPartition)); inboundFlux = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions) .receiveAtmostOnce();