private void seekToBeginning(Collection<ReceiverPartition> partitions) { for (ReceiverPartition partition : partitions) partition.seekToBeginning(); assertEquals(topic, partitions.iterator().next().topicPartition().topic()); assignSemaphore.release(); }
ReactiveEndToEndLatency(Map<String, Object> consumerPropsOverride, Map<String, Object> producerPropsOverride, String bootstrapServers, String topic) { super(consumerPropsOverride, producerPropsOverride, bootstrapServers, topic); sender = KafkaSender.create(SenderOptions.create(producerProps)); ReceiverOptions<byte[], byte[]> receiverOptions = ReceiverOptions.<byte[], byte[]>create(consumerProps) .addAssignListener(partitions -> { if (assignSemaphore.availablePermits() == 0) { partitions.forEach(p -> p.seekToEnd()); assignSemaphore.release(); } }) .subscription(Collections.singleton(topic)); flux = KafkaReceiver.create(receiverOptions) .receive(); receiveQueue = new LinkedBlockingQueue<>(); System.out.println("Running latency test using Reactive API, class=" + this.getClass().getName()); } public void initialize() {
.addAssignListener(partitions -> { for (ReceiverPartition p : partitions) { p.seekToBeginning();
/** * Tests that failure in seek in the assign listener terminates the inbound flux with an error. */ @Test public void seekFailure() throws Exception { sendMessages(topic, 0, 10); receiverOptions = receiverOptions .addAssignListener(partitions -> { for (ReceiverPartition p : partitions) p.seek(20); }) .subscription(Collections.singleton(topic)); receiveVerifyError(InvalidOffsetException.class); }
private void onPartitionsAssigned(Collection<ReceiverPartition> partitions) { assertEquals(topic, partitions.iterator().next().topicPartition().topic()); assignSemaphore.release(); }
private void subscribeToDestTopic(String groupId, String topic, ReceiverOptions<Integer, Person> receiverOptions, List<Person> received) { receiverOptions = receiverOptions .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") .consumerProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId) .addAssignListener(partitions -> { log.debug("Group {} assigned {}", groupId, partitions); partitions.forEach(p -> log.trace("Group {} partition {} position {}", groupId, p, p.position())); }) .addRevokeListener(p -> log.debug("Group {} revoked {}", groupId, p)); Disposable c = KafkaReceiver.create(receiverOptions.subscription(Collections.singleton(topic))) .receive() .subscribe(m -> { Person p = m.value(); received.add(p); log.debug("Thread {} Received from {}: {} ", Thread.currentThread().getName(), m.topic(), p); }); disposables.add(c); } private CommittableSource createTestSource(int count, List<Person> expected) {
/** * Consume from first available offset of partitions by seeking to start of all partitions in the assign listener. */ @Test public void seekToBeginning() throws Exception { sendMessages(topic, 0, 10); Semaphore assignSemaphore = new Semaphore(0); receiverOptions = receiverOptions .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest") .addAssignListener(partitions -> { for (ReceiverPartition p : partitions) p.seekToBeginning(); assignSemaphore.release(); }) .subscription(Collections.singleton(topic)); DefaultKafkaReceiver<Integer, String> receiver = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions); receiveWithOneOffAction(receiver, 10, 10, () -> sendMessages(topic, 10, 20)); assertTrue("Assign callback not invoked", assignSemaphore.tryAcquire(1, TimeUnit.SECONDS)); }
/** * Consume from specific offsets of partitions by seeking to offset in the assign listener. */ @Test public void seekToOffset() throws Exception { sendMessages(topic, 0, 10); long startOffset = 2; Semaphore assignSemaphore = new Semaphore(0); receiverOptions = receiverOptions .subscription(Collections.singleton(topic)) .addAssignListener(partitions -> { for (ReceiverPartition p : partitions) p.seek(startOffset); assignSemaphore.release(); }); int receiveCount = 10; for (TopicPartition partition : cluster.partitions(topic)) { receiveStartOffsets.put(partition, startOffset); receiveCount += cluster.log(partition).size() - startOffset; } CountDownLatch latch = new CountDownLatch(receiveCount); Disposable disposable = asyncReceive(latch); assertTrue("Assign callback not invoked", assignSemaphore.tryAcquire(1, TimeUnit.SECONDS)); sendMessages(topic, 10, 20); assertTrue("Messages not received", latch.await(1, TimeUnit.SECONDS)); verifyMessages(receiveCount); disposable.dispose(); }
@Before public void setUp() { topics = new ConcurrentHashMap<>(); for (int i : Arrays.asList(1, 2, 20, 200)) topics.put(i, "topic" + i); topic = topics.get(2); cluster = new MockCluster(2, topics); receiverOptions = ReceiverOptions.<Integer, String>create() .consumerProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId) .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") .addAssignListener(partitions -> { for (ReceiverPartition p : partitions) assignedPartitions.add(p.topicPartition()); }) .addRevokeListener(partitions -> { for (ReceiverPartition p : partitions) assignedPartitions.remove(p.topicPartition()); }); consumer = new MockConsumer(cluster); consumerFactory = new MockConsumer.Pool(Arrays.asList(consumer)); for (TopicPartition partition : cluster.partitions()) receiveStartOffsets.put(partition, 0L); }
.addAssignListener(partitions -> { for (ReceiverPartition p : partitions) { p.seekToBeginning(); assignedPartitions.put(p.topicPartition(), null);
/** * Consume from latest offsets of partitions by seeking to end of all partitions in the assign listener. */ @Test public void seekToEnd() throws Exception { sendMessages(topic, 0, 10); Semaphore assignSemaphore = new Semaphore(0); receiverOptions = receiverOptions .addAssignListener(partitions -> { for (ReceiverPartition p : partitions) p.seekToEnd(); assignSemaphore.release(); }) .subscription(Collections.singleton(topic)); for (TopicPartition partition : cluster.partitions(topic)) receiveStartOffsets.put(partition, (long) cluster.log(partition).size()); CountDownLatch latch = new CountDownLatch(10); Disposable disposable = asyncReceive(latch); assertTrue("Assign callback not invoked", assignSemaphore.tryAcquire(1, TimeUnit.SECONDS)); sendMessages(topic, 10, 20); assertTrue("Messages not received", latch.await(1, TimeUnit.SECONDS)); verifyMessages(10); disposable.dispose(); }
@Test public void consumerClose() throws Exception { int count = 10; for (int i = 0; i < 2; i++) { Collection<ReceiverPartition> seekablePartitions = new ArrayList<>(); receiverOptions = receiverOptions .addAssignListener(partitions -> { seekablePartitions.addAll(partitions); assignSemaphore.release(); }) .subscription(Collections.singletonList(topic)); KafkaReceiver<Integer, String> receiver = KafkaReceiver.create(receiverOptions); Flux<ConsumerRecord<Integer, String>> kafkaFlux = receiver .receiveAutoAck() .concatMap(r -> r); Disposable disposable = sendAndWaitForMessages(kafkaFlux, count); assertTrue("No partitions assigned", seekablePartitions.size() > 0); if (i == 0) waitForCommits(receiver, count); disposable.dispose(); try { seekablePartitions.iterator().next().seekToBeginning(); fail("Consumer not closed"); } catch (IllegalStateException e) { // expected exception } } }
@Test public void seekToOffset() throws Exception { int count = 10; sendMessages(0, count); receiverOptions = receiverOptions .addAssignListener(partitions -> { onPartitionsAssigned(partitions); for (ReceiverPartition partition : partitions) partition.seek(1); }) .subscription(Collections.singletonList(topic)); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive() .doOnError(e -> log.error("KafkaFlux exception", e)); sendReceive(kafkaFlux, count, count, partitions, count * 2 - partitions); }
@Before public void setUp() { cluster = new MockCluster(2, Collections.emptyMap()); cluster.addTopic(srcTopic, partitions); cluster.addTopic(destTopic, partitions); receiverOptions = ReceiverOptions.<Integer, String>create() .consumerProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId) .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") .consumerProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed") .consumerProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, String.valueOf(maxPollRecords)) .addAssignListener(partitions -> { for (ReceiverPartition p : partitions) assignedPartitions.add(p.topicPartition()); }) .addRevokeListener(partitions -> { for (ReceiverPartition p : partitions) assignedPartitions.remove(p.topicPartition()); }) .subscription(Collections.singleton(srcTopic)); consumerFactory = new MockConsumer.Pool(Arrays.asList(new MockConsumer(cluster), new MockConsumer(cluster))); receiver = new DefaultKafkaReceiver<Integer, String>(consumerFactory, receiverOptions); for (TopicPartition partition : cluster.partitions()) receiveStartOffsets.put(partition, 0L); SenderOptions<Integer, String> senderOptions = SenderOptions.<Integer, String>create() .producerProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "exactlyOnce"); producer = new MockProducer(cluster); Pool producerFactory = new Pool(Arrays.asList(producer)); sender = new DefaultKafkaSender<>(producerFactory, senderOptions); }
@Test public void seekToEnd() throws Exception { int count = 10; sendMessages(0, count); receiverOptions = receiverOptions .addAssignListener(partitions -> { for (ReceiverPartition partition : partitions) partition.seekToEnd(); onPartitionsAssigned(partitions); }) .subscription(Collections.singletonList(topic)); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive(); sendReceiveWithSendDelay(kafkaFlux, Duration.ofMillis(100), count, count); }