Collection<String> topics, java.util.function.Consumer<ConsumerRecord<K, V>> consumerFunction) { Properties props = getConsumerProperties(groupId, clientId, autoOffsetReset); Thread t = new Thread(() -> { LOGGER.debug("Starting consumer {} to read messages", clientId);
@Override public void start() throws Exception { // Kafka setup for the example File dataDir = Testing.Files.createTestingDirectory("cluster"); dataDir.deleteOnExit(); kafkaCluster = new KafkaCluster() .usingDirectory(dataDir) .withPorts(2181, 9092) .addBrokers(1) .deleteDataPriorToStartup(true) .startup(); // Deploy the dashboard JsonObject consumerConfig = new JsonObject((Map) kafkaCluster.useTo() .getConsumerProperties("the_group", "the_client", OffsetResetStrategy.LATEST)); vertx.deployVerticle( DashboardVerticle.class.getName(), new DeploymentOptions().setConfig(consumerConfig) ); // Deploy the metrics collector : 3 times JsonObject producerConfig = new JsonObject((Map) kafkaCluster.useTo() .getProducerProperties("the_producer")); vertx.deployVerticle( MetricsVerticle.class.getName(), new DeploymentOptions().setConfig(producerConfig).setInstances(3) ); }
private Properties setupConsumeWithHeaders(TestContext ctx, int numMessages, String topicName) { Async batch = ctx.async(); AtomicInteger index = new AtomicInteger(); kafkaCluster.useTo().produceStrings(numMessages, batch::complete, () -> new ProducerRecord<>(topicName, 0, "key-" + index.get(), "value-" + index.get(), Collections.singletonList(new RecordHeader("header_key" + index.get(), ("header_value" + index.getAndIncrement()).getBytes())))); batch.awaitSuccess(20000); Properties config = kafkaCluster.useTo().getConsumerProperties(topicName, topicName, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); return config; }
@Test public void testSubscription(TestContext ctx) throws Exception { String topicName = "testSubscription"; String consumerId = topicName; kafkaCluster.createTopic(topicName, 1, 1); Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); Context context = vertx.getOrCreateContext(); consumer = createConsumer(context, config); Async done = ctx.async(); consumer.handler(record -> { // no need for handling incoming records in this test }); consumer.subscribe(Collections.singleton(topicName), asyncResult -> { if (asyncResult.succeeded()) { consumer.subscription(asyncResult1 -> { if (asyncResult1.succeeded()) { ctx.assertTrue(asyncResult1.result().contains(topicName)); done.complete(); } else { ctx.fail(); } }); } else { ctx.fail(); } }); }
@Test public void testListTopics(TestContext ctx) throws Exception { String topicName = "testListTopics"; String consumerId = topicName; kafkaCluster.createTopic(topicName, 1, 1); Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); Context context = vertx.getOrCreateContext(); consumer = createConsumer(context, config); Async done = ctx.async(); consumer.handler(record -> { // no need for handling incoming records in this test }); consumer.subscribe(Collections.singleton(topicName), asyncResult -> { if (asyncResult.succeeded()) { consumer.listTopics(asyncResult1 -> { if (asyncResult1.succeeded()) { ctx.assertTrue(asyncResult1.result().containsKey(topicName)); done.complete(); } else { ctx.fail(); } }); } else { ctx.fail(); } }); }
@Test public void testPollTimeout(TestContext ctx) throws Exception { Async async = ctx.async(); String topicName = "testPollTimeout"; Properties config = kafkaCluster.useTo().getConsumerProperties(topicName, topicName, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); io.vertx.kafka.client.common.TopicPartition topicPartition = new io.vertx.kafka.client.common.TopicPartition(topicName, 0); KafkaConsumer<Object, Object> consumerWithCustomTimeout = KafkaConsumer.create(vertx, config); int pollingTimeout = 1500; // Set the polling timeout to 1500 ms (default is 1000) consumerWithCustomTimeout.pollTimeout(pollingTimeout); // Subscribe to the empty topic (we want the poll() call to timeout!) consumerWithCustomTimeout.subscribe(topicName, subscribeRes -> { consumerWithCustomTimeout.handler(rec -> {}); // Consumer will now immediately poll once long beforeSeek = System.currentTimeMillis(); consumerWithCustomTimeout.seekToBeginning(topicPartition, seekRes -> { long durationWShortTimeout = System.currentTimeMillis() - beforeSeek; ctx.assertTrue(durationWShortTimeout >= pollingTimeout, "Operation must take at least as long as the polling timeout"); consumerWithCustomTimeout.close(); async.countDown(); }); }); }
@Test public void testNotCommitted(TestContext ctx) throws Exception { String topicName = "testNotCommitted"; String consumerId = topicName; kafkaCluster.createTopic(topicName, 1, 1); Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); Async done = ctx.async(); KafkaConsumer<Object, Object> consumer = KafkaConsumer.create(vertx, config); consumer.handler(rec -> {}); consumer.partitionsAssignedHandler(partitions -> { for (io.vertx.kafka.client.common.TopicPartition partition : partitions) { consumer.committed(partition, ar -> { if (ar.succeeded()) { ctx.assertNull(ar.result()); } else { ctx.fail(ar.cause()); } }); } done.complete(); }); consumer.subscribe(Collections.singleton(topicName)); }
@Test public void testPollTimeout(TestContext ctx) throws Exception { Async async = ctx.async(); String topicName = "testPollTimeout"; Properties config = kafkaCluster.useTo().getConsumerProperties(topicName, topicName, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); io.vertx.kafka.client.common.TopicPartition topicPartition = new io.vertx.kafka.client.common.TopicPartition(topicName, 0); KafkaConsumer<Object, Object> consumerWithCustomTimeout = KafkaConsumer.create(vertx, config); int pollingTimeout = 1500; // Set the polling timeout to 1500 ms (default is 1000) consumerWithCustomTimeout.pollTimeout(pollingTimeout); // Subscribe to the empty topic (we want the poll() call to timeout!) consumerWithCustomTimeout.subscribe(topicName, subscribeRes -> { consumerWithCustomTimeout.handler(rec -> {}); // Consumer will now immediately poll once long beforeSeek = System.currentTimeMillis(); consumerWithCustomTimeout.seekToBeginning(topicPartition, seekRes -> { long durationWShortTimeout = System.currentTimeMillis() - beforeSeek; ctx.assertTrue(durationWShortTimeout >= pollingTimeout, "Operation must take at least as long as the polling timeout"); consumerWithCustomTimeout.close(); async.countDown(); }); }); }
@Test public void testBatchHandler(TestContext ctx) throws Exception { String topicName = "testBatchHandler"; String consumerId = topicName; Async batch1 = ctx.async(); AtomicInteger index = new AtomicInteger(); int numMessages = 500; kafkaCluster.useTo().produceStrings(numMessages, batch1::complete, () -> new ProducerRecord<>(topicName, 0, "key-" + index.get(), "value-" + index.getAndIncrement())); batch1.awaitSuccess(10000); Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); Context context = vertx.getOrCreateContext(); consumer = createConsumer(context, config); Async batchHandler = ctx.async(); consumer.batchHandler(records -> { ctx.assertEquals(numMessages, records.count()); batchHandler.complete(); }); consumer.exceptionHandler(ctx::fail); consumer.handler(rec -> {}); consumer.subscribe(Collections.singleton(topicName)); }
@Test public void testNotCommitted(TestContext ctx) throws Exception { String topicName = "testNotCommitted"; String consumerId = topicName; kafkaCluster.createTopic(topicName, 1, 1); Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); Async done = ctx.async(); KafkaConsumer<Object, Object> consumer = KafkaConsumer.create(vertx, config); consumer.handler(rec -> {}); consumer.partitionsAssignedHandler(partitions -> { for (io.vertx.kafka.client.common.TopicPartition partition : partitions) { consumer.committed(partition, ar -> { if (ar.succeeded()) { ctx.assertNull(ar.result()); } else { ctx.fail(ar.cause()); } }); } done.complete(); }); consumer.subscribe(Collections.singleton(topicName)); }
@Test public void testBatchHandler(TestContext ctx) throws Exception { String topicName = "testBatchHandler"; String consumerId = topicName; Async batch1 = ctx.async(); AtomicInteger index = new AtomicInteger(); int numMessages = 500; kafkaCluster.useTo().produceStrings(numMessages, batch1::complete, () -> new ProducerRecord<>(topicName, 0, "key-" + index.get(), "value-" + index.getAndIncrement())); batch1.awaitSuccess(10000); Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); Context context = vertx.getOrCreateContext(); consumer = createConsumer(context, config); Async batchHandler = ctx.async(); consumer.batchHandler(records -> { ctx.assertEquals(numMessages, records.count()); batchHandler.complete(); }); consumer.exceptionHandler(ctx::fail); consumer.handler(rec -> {}); consumer.subscribe(Collections.singleton(topicName)); }
@Test public void testConsume(TestContext ctx) throws Exception { final String topicName = "testConsume"; String consumerId = topicName; Async batch = ctx.async(); AtomicInteger index = new AtomicInteger(); int numMessages = 1000; kafkaCluster.useTo().produceStrings(numMessages, batch::complete, () -> new ProducerRecord<>(topicName, 0, "key-" + index.get(), "value-" + index.getAndIncrement())); batch.awaitSuccess(20000); Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); consumer = createConsumer(vertx, config); Async done = ctx.async(); AtomicInteger count = new AtomicInteger(numMessages); consumer.exceptionHandler(ctx::fail); consumer.handler(rec -> { if (count.decrementAndGet() == 0) { done.complete(); } }); consumer.subscribe(Collections.singleton(topicName)); }
@Test public void testPartitionsFor(TestContext ctx) throws Exception { String topicName = "testPartitionsFor"; String consumerId = topicName; kafkaCluster.createTopic(topicName, 2, 1); Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); Context context = vertx.getOrCreateContext(); consumer = createConsumer(context, config); Async done = ctx.async(); consumer.partitionsFor(topicName, ar -> { if (ar.succeeded()) { List<PartitionInfo> partitionInfo = ar.result(); ctx.assertEquals(2, partitionInfo.size()); } else { ctx.fail(); } done.complete(); }); }
@Test public void testConsume(TestContext ctx) throws Exception { final String topicName = "testConsume"; String consumerId = topicName; Async batch = ctx.async(); AtomicInteger index = new AtomicInteger(); int numMessages = 1000; kafkaCluster.useTo().produceStrings(numMessages, batch::complete, () -> new ProducerRecord<>(topicName, 0, "key-" + index.get(), "value-" + index.getAndIncrement())); batch.awaitSuccess(20000); Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); consumer = createConsumer(vertx, config); Async done = ctx.async(); AtomicInteger count = new AtomicInteger(numMessages); consumer.exceptionHandler(ctx::fail); consumer.handler(rec -> { if (count.decrementAndGet() == 0) { done.complete(); } }); consumer.subscribe(Collections.singleton(topicName)); }
@Test // Regression test for ISS-73: undeployment of a verticle with unassigned consumer fails public void testUndeployUnassignedConsumer(TestContext ctx) { Properties config = kafkaCluster.useTo().getConsumerProperties("testUndeployUnassignedConsumer_consumer", "testUndeployUnassignedConsumer_consumer", OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); Async async = ctx.async(1); vertx.deployVerticle(new AbstractVerticle() { @Override public void start() { KafkaConsumer<String, String> consumer = KafkaConsumer.create(vertx, config); } }, ctx.asyncAssertSuccess(id -> { vertx.undeploy(id, ctx.asyncAssertSuccess(v2 -> async.complete())); })); async.awaitSuccess(10000); waitUntil("Expected " + countThreads("vert.x-kafka-consumer-thread") + " == " + numVertxKafkaConsumerThread, () -> countThreads("vert.x-kafka-consumer-thread") == numKafkaConsumerNetworkThread); } }
@Test public void testPartitionsFor(TestContext ctx) throws Exception { String topicName = "testPartitionsFor"; String consumerId = topicName; kafkaCluster.createTopic(topicName, 2, 1); Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); Context context = vertx.getOrCreateContext(); consumer = createConsumer(context, config); Async done = ctx.async(); consumer.partitionsFor(topicName, ar -> { if (ar.succeeded()) { List<PartitionInfo> partitionInfo = ar.result(); ctx.assertEquals(2, partitionInfo.size()); } else { ctx.fail(); } done.complete(); }); }
@Test // Regression test for ISS-73: undeployment of a verticle with unassigned consumer fails public void testUndeployUnassignedConsumer(TestContext ctx) { Properties config = kafkaCluster.useTo().getConsumerProperties("testUndeployUnassignedConsumer_consumer", "testUndeployUnassignedConsumer_consumer", OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); Async async = ctx.async(1); vertx.deployVerticle(new AbstractVerticle() { @Override public void start() { KafkaConsumer<String, String> consumer = KafkaConsumer.create(vertx, config); } }, ctx.asyncAssertSuccess(id -> { vertx.undeploy(id, ctx.asyncAssertSuccess(v2 -> async.complete())); })); async.awaitSuccess(10000); waitUntil("Expected " + countThreads("vert.x-kafka-consumer-thread") + " == " + numVertxKafkaConsumerThread, () -> countThreads("vert.x-kafka-consumer-thread") == numKafkaConsumerNetworkThread); } }
@Test public void testPollExceptionHandler(TestContext ctx) throws Exception { Properties config = kafkaCluster.useTo().getConsumerProperties("someRandomGroup", "someRandomClientID", OffsetResetStrategy.EARLIEST); config.remove("group.id"); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); consumer = createConsumer(vertx, config); Async done = ctx.async(); consumer.exceptionHandler(ex -> { ctx.assertTrue(ex instanceof InvalidGroupIdException); done.complete(); }); consumer.subscribe(Collections.singleton("someTopic")).handler(System.out::println); }
@Test public void testPollExceptionHandler(TestContext ctx) throws Exception { Properties config = kafkaCluster.useTo().getConsumerProperties("someRandomGroup", "someRandomClientID", OffsetResetStrategy.EARLIEST); config.remove("group.id"); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); consumer = createConsumer(vertx, config); Async done = ctx.async(); consumer.exceptionHandler(ex -> { ctx.assertTrue(ex instanceof InvalidGroupIdException); done.complete(); }); consumer.subscribe(Collections.singleton("someTopic")).handler(System.out::println); }
private Properties setupConsumeWithHeaders(TestContext ctx, int numMessages, String topicName) { Async batch = ctx.async(); AtomicInteger index = new AtomicInteger(); kafkaCluster.useTo().produceStrings(numMessages, batch::complete, () -> new ProducerRecord<>(topicName, 0, "key-" + index.get(), "value-" + index.get(), Collections.singletonList(new RecordHeader("header_key" + index.get(), ("header_value" + index.getAndIncrement()).getBytes())))); batch.awaitSuccess(20000); Properties config = kafkaCluster.useTo().getConsumerProperties(topicName, topicName, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); return config; }