@Override public ReceiverOptions<Integer, Person> receiverOptions() { return super.receiverOptions() .consumerProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"); } public ProducerRecord<Integer, Person> transform(Person p) {
public ReceiverOptions<Integer, Person> receiverOptions() { return super.receiverOptions() .consumerProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "10") .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") .consumerProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"); } @Override
private String getProperty(String propName, Object defaultValue) { Object value = receiverOptions.consumerProperty(propName); if (value == null) value = defaultValue; return String.valueOf(value); }
public ReceiverOptions<Integer, Person> receiverOptions() { return super.receiverOptions().consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); } };
public ReceiverOptions<Integer, Person> receiverOptions() { return super.receiverOptions().consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); } };
public ReceiverOptions<Integer, Person> receiverOptions() { return super.receiverOptions().consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); } };
public ReceiverOptions<Integer, Person> receiverOptions() { return super.receiverOptions().consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); } };
public ReceiverOptions<Integer, Person> receiverOptions() { return super.receiverOptions().consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); } };
public ReceiverOptions<Integer, Person> receiverOptions() { return super.receiverOptions().consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); } @Override
/** * Tests that sessions don't timeout when message processing takes longer than session timeout * when background heartbeating in Kafka consumers is enabled. Heartbeat flux is disabled in this case. */ @Test public void autoHeartbeat() throws Exception { long sessionTimeoutMs = 500; consumer = new MockConsumer(cluster); consumerFactory = new MockConsumer.Pool(Arrays.asList(consumer)); receiverOptions = receiverOptions .consumerProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, String.valueOf(sessionTimeoutMs)) .consumerProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "100") .subscription(Collections.singleton(topic)); sendMessages(topic, 0, 10); DefaultKafkaReceiver<Integer, String> receiver = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions); receiveWithOneOffAction(receiver, 1, 9, () -> TestUtils.sleep(sessionTimeoutMs + 500)); }
@Test public void backPressureReceiveAutoAck() throws Exception { receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1") .subscription(Collections.singleton(topic)); Flux<?> flux = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions).receiveAutoAck(); testBackPressure(flux); }
@Test public void backPressureReceiveAtmostOnce() throws Exception { receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1") .subscription(Collections.singleton(topic)); Flux<?> flux = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions).receiveAtmostOnce(); testBackPressure(flux); }
@Test public void backPressureReceive() throws Exception { receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1") .subscription(Collections.singleton(topic)); Flux<?> flux = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions).receive(); testBackPressure(flux); }
/** * Consume from first available offset of partitions by seeking to start of all partitions in the assign listener. */ @Test public void seekToBeginning() throws Exception { sendMessages(topic, 0, 10); Semaphore assignSemaphore = new Semaphore(0); receiverOptions = receiverOptions .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest") .addAssignListener(partitions -> { for (ReceiverPartition p : partitions) p.seekToBeginning(); assignSemaphore.release(); }) .subscription(Collections.singleton(topic)); DefaultKafkaReceiver<Integer, String> receiver = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions); receiveWithOneOffAction(receiver, 10, 10, () -> sendMessages(topic, 10, 20)); assertTrue("Assign callback not invoked", assignSemaphore.tryAcquire(1, TimeUnit.SECONDS)); }
@Test public void autoCommitNonRetriableException() throws Exception { int count = 5; receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); testAutoCommitFailureScenarios(false, count, 2, 0, 10); Flux<? extends ConsumerRecord<Integer, String>> flux = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceiveWithRedelivery(flux, count, count, 3, 5); }
@Test public void sendTransactionalReadUncommitted() throws Exception { receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_uncommitted"); int count = 100; CountDownLatch latch1 = new CountDownLatch(count); CountDownLatch latch2 = new CountDownLatch(count * 2); CountDownLatch latch3 = new CountDownLatch(count * 3); subscribe(createReceiver().receive(), latch1, latch2, latch3); sendMessages(0, count); waitForMessages(latch1); // non-transactional messages received KafkaSender<Integer, String> txSender = createTransactionalSender(); txSender.sendTransactionally(Flux.just(createSenderRecords(count, count, true))) .then().block(Duration.ofSeconds(receiveTimeoutMillis)); waitForMessages(latch2); // transactional messages received before commit sendMessages(count * 2, count); waitForMessages(latch3); checkConsumedMessages(0, count * 3); }
@Test public void sendTransactionalReadCommitted() throws Exception { receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"); int count = 100; CountDownLatch latch = new CountDownLatch(count); subscribe(createReceiver().receive(), latch); KafkaSender<Integer, String> txSender = createTransactionalSender(); txSender.sendTransactionally(Flux.just(createSenderRecords(0, count, true))) .doOnNext(result -> assertEquals(count, latch.getCount())) .blockLast(Duration.ofSeconds(receiveTimeoutMillis)); waitForMessages(latch); checkConsumedMessages(0, count); }
@Test public void autoCommitFailurePropagationAfterRetries() throws Exception { int count = 5; receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") .maxCommitAttempts(2); testAutoCommitFailureScenarios(true, count, 2, 0, Integer.MAX_VALUE); Flux<? extends ConsumerRecord<Integer, String>> flux = createReceiver().receive(); sendReceiveWithRedelivery(flux, count, count, 2, 5); }
@Test public void atleastOnceClose() throws Exception { receiverOptions = receiverOptions.closeTimeout(Duration.ofMillis(1000)) .commitBatchSize(10) .commitInterval(Duration.ofMillis(60000)) .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> { if (receivedMessages.get(record.partition()).size() < 10) record.receiverOffset().acknowledge(); }); sendReceive(fluxWithAck, 0, 100, 0, 100); // Check that close commits ack'ed records, does not commit un-ack'ed records cancelSubscriptions(true); clearReceivedMessages(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux2 = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceive(kafkaFlux2, 100, 100, 10 * partitions, 200 - (10 * partitions)); }
@Test public void offsetResetLatest() throws Exception { int count = 10; sendMessages(0, count); receiverOptions = receiverOptions .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest") .addAssignListener(partitions -> assignSemaphore.release()); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = createReceiver() .receive() .doOnNext(record -> onReceive(record)); StepVerifier.create(kafkaFlux) .then(() -> assignSemaphore.acquireUninterruptibly()) .expectNoEvent(Duration.ofMillis(100)) .then(() -> sendMessages(count, count)) .expectNextCount(count) .thenCancel() .verify(Duration.ofSeconds(receiveTimeoutMillis)); checkConsumedMessages(count, count); }