/** * Subscribe to the given list of topics to get dynamically assigned partitions. * @param topics topics to subscribe to * @return current KafkaConsumer instance */ public io.vertx.rxjava.kafka.client.consumer.KafkaConsumer<K, V> subscribe(Set<String> topics) { delegate.subscribe(topics); return this; }
/** * Subscribe to the given topic to get dynamically assigned partitions. * @param topic topic to subscribe to * @return current KafkaConsumer instance */ public io.vertx.rxjava.kafka.client.consumer.KafkaConsumer<K, V> subscribe(String topic) { delegate.subscribe(topic); return this; }
/** * Subscribe to the given list of topics to get dynamically assigned partitions. * @param topics topics to subscribe to * @return current KafkaConsumer instance */ public io.vertx.rxjava.kafka.client.consumer.KafkaConsumer<K, V> subscribe(Set<String> topics) { delegate.subscribe(topics); return this; }
/** * Subscribe to the given topic to get dynamically assigned partitions. * @param topic topic to subscribe to * @return current KafkaConsumer instance */ public io.vertx.rxjava.kafka.client.consumer.KafkaConsumer<K, V> subscribe(String topic) { delegate.subscribe(topic); return this; }
/** * Subscribe to the given list of topics to get dynamically assigned partitions. * <p> * Due to internal buffering of messages, when changing the subscribed topics * the old set of topics may remain in effect * (as observed by the record handler}) * until some time <em>after</em> the given <code>completionHandler</code> * is called. In contrast, the once the given <code>completionHandler</code> * is called the {@link io.vertx.rxjava.kafka.client.consumer.KafkaConsumer#batchHandler} will only see messages * consistent with the new set of topics. * @param topics topics to subscribe to * @param completionHandler handler called on operation completed * @return current KafkaConsumer instance */ public io.vertx.rxjava.kafka.client.consumer.KafkaConsumer<K, V> subscribe(Set<String> topics, Handler<AsyncResult<Void>> completionHandler) { delegate.subscribe(topics, completionHandler); return this; }
/** * Subscribe to the given topic to get dynamically assigned partitions. * <p> * Due to internal buffering of messages, when changing the subscribed topic * the old topic may remain in effect * (as observed by the record handler}) * until some time <em>after</em> the given <code>completionHandler</code> * is called. In contrast, the once the given <code>completionHandler</code> * is called the {@link io.vertx.rxjava.kafka.client.consumer.KafkaConsumer#batchHandler} will only see messages * consistent with the new topic. * @param topic topic to subscribe to * @param completionHandler handler called on operation completed * @return current KafkaConsumer instance */ public io.vertx.rxjava.kafka.client.consumer.KafkaConsumer<K, V> subscribe(String topic, Handler<AsyncResult<Void>> completionHandler) { delegate.subscribe(topic, completionHandler); return this; }
/** * Subscribe to the given list of topics to get dynamically assigned partitions. * <p> * Due to internal buffering of messages, when changing the subscribed topics * the old set of topics may remain in effect * (as observed by the record handler}) * until some time <em>after</em> the given <code>completionHandler</code> * is called. In contrast, the once the given <code>completionHandler</code> * is called the {@link io.vertx.rxjava.kafka.client.consumer.KafkaConsumer#batchHandler} will only see messages * consistent with the new set of topics. * @param topics topics to subscribe to * @param completionHandler handler called on operation completed * @return current KafkaConsumer instance */ public io.vertx.rxjava.kafka.client.consumer.KafkaConsumer<K, V> subscribe(Set<String> topics, Handler<AsyncResult<Void>> completionHandler) { delegate.subscribe(topics, completionHandler); return this; }
/** * Subscribe to the given topic to get dynamically assigned partitions. * <p> * Due to internal buffering of messages, when changing the subscribed topic * the old topic may remain in effect * (as observed by the record handler}) * until some time <em>after</em> the given <code>completionHandler</code> * is called. In contrast, the once the given <code>completionHandler</code> * is called the {@link io.vertx.rxjava.kafka.client.consumer.KafkaConsumer#batchHandler} will only see messages * consistent with the new topic. * @param topic topic to subscribe to * @param completionHandler handler called on operation completed * @return current KafkaConsumer instance */ public io.vertx.rxjava.kafka.client.consumer.KafkaConsumer<K, V> subscribe(String topic, Handler<AsyncResult<Void>> completionHandler) { delegate.subscribe(topic, completionHandler); return this; }
@Override public void start(Future<Void> startFuture) throws Exception { Map<String, String> config = new HashMap<>(); config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); config.put(ConsumerConfig.GROUP_ID_CONFIG, "my-group"); config.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); KafkaConsumer consumer = KafkaConsumer.create(vertx, config); consumer.handler(r -> { System.out.println(r); }); consumer.subscribe("my-topic"); startFuture.complete(); } }
@Test public void test(TestContext context) { this.vertx = Vertx.vertx(); this.context = context; this.async = context.async(); Map<String, String> consumerConfig = createConsumerConfig(); KafkaConsumer<String, String> consumer = KafkaConsumer.create(vertx, consumerConfig); TestBatchHandler handler = new TestBatchHandler(BATCH_SIZE, 10000, this::handleAndCheckData, consumer); consumer.handler(handler::add); consumer.exceptionHandler(e -> logger.error("consumer error", e)); consumer.subscribe(TOPIC); }
@Test public void testConsumerWithHeader(TestContext ctx) { int numMessages = 1000; String topicName = "testConsumerWithHeader"; Properties config = setupConsumeWithHeaders(ctx, numMessages, topicName); consumer = createConsumer(vertx, config); KafkaConsumer<String, String> consumer = new KafkaConsumerImpl<>(this.consumer); Async done = ctx.async(); AtomicInteger count = new AtomicInteger(numMessages); AtomicInteger headerIndex = new AtomicInteger(); consumer.exceptionHandler(ctx::fail); consumer.handler(rec -> { List<KafkaHeader> headers = rec.headers(); ctx.assertEquals(1, headers.size()); KafkaHeader header = headers.get(0); ctx.assertEquals("header_key" + headerIndex.get(), header.key()); ctx.assertEquals("header_value" + headerIndex.getAndIncrement(), header.value().toString()); if (count.decrementAndGet() == 0) { done.complete(); } }); consumer.subscribe(Collections.singleton(topicName)); }
@Test public void testConsumerWithHeader(TestContext ctx) { int numMessages = 1000; String topicName = "testConsumerWithHeader"; Properties config = setupConsumeWithHeaders(ctx, numMessages, topicName); consumer = createConsumer(vertx, config); KafkaConsumer<String, String> consumer = new KafkaConsumerImpl<>(this.consumer); Async done = ctx.async(); AtomicInteger count = new AtomicInteger(numMessages); AtomicInteger headerIndex = new AtomicInteger(); consumer.exceptionHandler(ctx::fail); consumer.handler(rec -> { List<KafkaHeader> headers = rec.headers(); ctx.assertEquals(1, headers.size()); KafkaHeader header = headers.get(0); ctx.assertEquals("header_key" + headerIndex.get(), header.key()); ctx.assertEquals("header_value" + headerIndex.getAndIncrement(), header.value().toString()); if (count.decrementAndGet() == 0) { done.complete(); } }); consumer.subscribe(Collections.singleton(topicName)); }
@Test public void testNotCommitted(TestContext ctx) throws Exception { String topicName = "testNotCommitted"; String consumerId = topicName; kafkaCluster.createTopic(topicName, 1, 1); Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); Async done = ctx.async(); KafkaConsumer<Object, Object> consumer = KafkaConsumer.create(vertx, config); consumer.handler(rec -> {}); consumer.partitionsAssignedHandler(partitions -> { for (io.vertx.kafka.client.common.TopicPartition partition : partitions) { consumer.committed(partition, ar -> { if (ar.succeeded()) { ctx.assertNull(ar.result()); } else { ctx.fail(ar.cause()); } }); } done.complete(); }); consumer.subscribe(Collections.singleton(topicName)); }
AtomicInteger count = new AtomicInteger(5); consumer.subscribe(Collections.singleton(topicName), subscribeResult -> {
@Test public void testNotCommitted(TestContext ctx) throws Exception { String topicName = "testNotCommitted"; String consumerId = topicName; kafkaCluster.createTopic(topicName, 1, 1); Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); Async done = ctx.async(); KafkaConsumer<Object, Object> consumer = KafkaConsumer.create(vertx, config); consumer.handler(rec -> {}); consumer.partitionsAssignedHandler(partitions -> { for (io.vertx.kafka.client.common.TopicPartition partition : partitions) { consumer.committed(partition, ar -> { if (ar.succeeded()) { ctx.assertNull(ar.result()); } else { ctx.fail(ar.cause()); } }); } done.complete(); }); consumer.subscribe(Collections.singleton(topicName)); }
AtomicInteger count = new AtomicInteger(5); consumer.subscribe(Collections.singleton(topicName), subscribeResult -> {
@Test public void testPollTimeout(TestContext ctx) throws Exception { Async async = ctx.async(); String topicName = "testPollTimeout"; Properties config = kafkaCluster.useTo().getConsumerProperties(topicName, topicName, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); io.vertx.kafka.client.common.TopicPartition topicPartition = new io.vertx.kafka.client.common.TopicPartition(topicName, 0); KafkaConsumer<Object, Object> consumerWithCustomTimeout = KafkaConsumer.create(vertx, config); int pollingTimeout = 1500; // Set the polling timeout to 1500 ms (default is 1000) consumerWithCustomTimeout.pollTimeout(pollingTimeout); // Subscribe to the empty topic (we want the poll() call to timeout!) consumerWithCustomTimeout.subscribe(topicName, subscribeRes -> { consumerWithCustomTimeout.handler(rec -> {}); // Consumer will now immediately poll once long beforeSeek = System.currentTimeMillis(); consumerWithCustomTimeout.seekToBeginning(topicPartition, seekRes -> { long durationWShortTimeout = System.currentTimeMillis() - beforeSeek; ctx.assertTrue(durationWShortTimeout >= pollingTimeout, "Operation must take at least as long as the polling timeout"); consumerWithCustomTimeout.close(); async.countDown(); }); }); }
AtomicInteger count = new AtomicInteger(numMessages); consumer.subscribe(Collections.singleton(topicName), subscribeResult -> {
@Test public void testPollTimeout(TestContext ctx) throws Exception { Async async = ctx.async(); String topicName = "testPollTimeout"; Properties config = kafkaCluster.useTo().getConsumerProperties(topicName, topicName, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); io.vertx.kafka.client.common.TopicPartition topicPartition = new io.vertx.kafka.client.common.TopicPartition(topicName, 0); KafkaConsumer<Object, Object> consumerWithCustomTimeout = KafkaConsumer.create(vertx, config); int pollingTimeout = 1500; // Set the polling timeout to 1500 ms (default is 1000) consumerWithCustomTimeout.pollTimeout(pollingTimeout); // Subscribe to the empty topic (we want the poll() call to timeout!) consumerWithCustomTimeout.subscribe(topicName, subscribeRes -> { consumerWithCustomTimeout.handler(rec -> {}); // Consumer will now immediately poll once long beforeSeek = System.currentTimeMillis(); consumerWithCustomTimeout.seekToBeginning(topicPartition, seekRes -> { long durationWShortTimeout = System.currentTimeMillis() - beforeSeek; ctx.assertTrue(durationWShortTimeout >= pollingTimeout, "Operation must take at least as long as the polling timeout"); consumerWithCustomTimeout.close(); async.countDown(); }); }); }
}); wrappedConsumer.handler(rec -> {}); wrappedConsumer.subscribe(Collections.singleton(topicName));