/** * Asynchronously consume all messages on the given topic from the cluster. * * @param topicName the name of the topic; may not be null * @param count the expected number of messages to read before terminating; may not be null * @param timeout the maximum time that this consumer should run before terminating; must be positive * @param unit the unit of time for the timeout; may not be null * @param completion the function to call when all messages have been consumed; may be null * @param consumer the function to consume the messages; may not be null */ public void consumeStrings(String topicName, int count, long timeout, TimeUnit unit, Runnable completion, BiPredicate<String, String> consumer) { AtomicLong readCounter = new AtomicLong(); consumeStrings(continueIfNotExpired(() -> readCounter.get() < count, timeout, unit), completion, Collections.singleton(topicName), record -> { if (consumer.test(record.key(), record.value())) readCounter.incrementAndGet(); }); }
private void assertReceiveMessages(TestContext ctx, String topicName, int numMessages) { Async done = ctx.async(); AtomicInteger seq = new AtomicInteger(); kafkaCluster.useTo().consumeStrings(() -> seq.get() < numMessages, done::complete, Collections.singleton(topicName), record -> { int count = seq.getAndIncrement(); ctx.assertEquals("key-" + count, record.key()); ctx.assertEquals("value-" + count, record.value()); ctx.assertEquals("header_value-" + count, new String(record.headers().headers("header_key").iterator().next().value())); }); }
private void assertReceiveMessages(TestContext ctx, String topicName, int numMessages) { Async done = ctx.async(); AtomicInteger seq = new AtomicInteger(); kafkaCluster.useTo().consumeStrings(() -> seq.get() < numMessages, done::complete, Collections.singleton(topicName), record -> { int count = seq.getAndIncrement(); ctx.assertEquals("key-" + count, record.key()); ctx.assertEquals("value-" + count, record.value()); ctx.assertEquals("header_value-" + count, new String(record.headers().headers("header_key").iterator().next().value())); }); }
@Test public void testCleanupInProducer(TestContext ctx) { Properties config = kafkaCluster.useTo().getProducerProperties("the_producer"); config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); Async deployLatch = ctx.async(); AtomicReference<KafkaProducer<String, String>> producerRef = new AtomicReference<>(); AtomicReference<String> deploymentRef = new AtomicReference<>(); vertx.deployVerticle(new AbstractVerticle() { @Override public void start() throws Exception { KafkaProducer<String, String> producer = KafkaProducer.create(vertx, config); producerRef.set(producer); producer.write(KafkaProducerRecord.create("the_topic", "the_value"), ctx.asyncAssertSuccess()); } }, ctx.asyncAssertSuccess(id -> { deploymentRef.set(id); deployLatch.complete(); })); deployLatch.awaitSuccess(15000); Async undeployLatch = ctx.async(); kafkaCluster.useTo().consumeStrings("the_topic", 1, 10, TimeUnit.SECONDS, () -> { vertx.undeploy(deploymentRef.get(), ctx.asyncAssertSuccess(v -> { undeployLatch.complete(); })); }); undeployLatch.awaitSuccess(10000); waitUntil(() -> countThreads("kafka-producer-network-thread") == numKafkaProducerNetworkThread); }
@Test public void testCleanupInProducer(TestContext ctx) { Properties config = kafkaCluster.useTo().getProducerProperties("the_producer"); config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); Async deployLatch = ctx.async(); AtomicReference<KafkaProducer<String, String>> producerRef = new AtomicReference<>(); AtomicReference<String> deploymentRef = new AtomicReference<>(); vertx.deployVerticle(new AbstractVerticle() { @Override public void start() throws Exception { KafkaProducer<String, String> producer = KafkaProducer.create(vertx, config); producerRef.set(producer); producer.write(KafkaProducerRecord.create("the_topic", "the_value"), ctx.asyncAssertSuccess()); } }, ctx.asyncAssertSuccess(id -> { deploymentRef.set(id); deployLatch.complete(); })); deployLatch.awaitSuccess(15000); Async undeployLatch = ctx.async(); kafkaCluster.useTo().consumeStrings("the_topic", 1, 10, TimeUnit.SECONDS, () -> { vertx.undeploy(deploymentRef.get(), ctx.asyncAssertSuccess(v -> { undeployLatch.complete(); })); }); undeployLatch.awaitSuccess(10000); waitUntil(() -> countThreads("kafka-producer-network-thread") == numKafkaProducerNetworkThread); }
@Test public void testSharedProducer(TestContext ctx) { Properties config = kafkaCluster.useTo().getProducerProperties("the_producer"); config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); int num = 3; Async sentLatch = ctx.async(num); LinkedList<KafkaProducer<String, String>> producers = new LinkedList<>(); for (int i = 0;i < num;i++) { KafkaProducer<String, String> producer = KafkaProducer.createShared(vertx, "the-name", config); producer.write(KafkaProducerRecord.create("the_topic", "the_value"), ctx.asyncAssertSuccess(v -> { sentLatch.countDown(); })); producers.add(producer); } sentLatch.awaitSuccess(10000); Async async = ctx.async(); kafkaCluster.useTo().consumeStrings("the_topic", num, 10, TimeUnit.SECONDS, () -> { close(ctx, producers, async::complete); }); async.awaitSuccess(10000); waitUntil(() -> countThreads("kafka-producer-network-thread") == numKafkaProducerNetworkThread); }
@Test public void testSharedProducer(TestContext ctx) { Properties config = kafkaCluster.useTo().getProducerProperties("the_producer"); config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); int num = 3; Async sentLatch = ctx.async(num); LinkedList<KafkaProducer<String, String>> producers = new LinkedList<>(); for (int i = 0;i < num;i++) { KafkaProducer<String, String> producer = KafkaProducer.createShared(vertx, "the-name", config); producer.write(KafkaProducerRecord.create("the_topic", "the_value"), ctx.asyncAssertSuccess(v -> { sentLatch.countDown(); })); producers.add(producer); } sentLatch.awaitSuccess(10000); Async async = ctx.async(); kafkaCluster.useTo().consumeStrings("the_topic", num, 10, TimeUnit.SECONDS, () -> { close(ctx, producers, async::complete); }); async.awaitSuccess(10000); waitUntil(() -> countThreads("kafka-producer-network-thread") == numKafkaProducerNetworkThread); }
@Test public void testSharedProducerCleanupInVerticle(TestContext ctx) { Properties config = kafkaCluster.useTo().getProducerProperties("the_producer"); config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); int num = 3; Async sentLatch = ctx.async(num); AtomicReference<String> deploymentID = new AtomicReference<>(); vertx.deployVerticle(TheVerticle.class.getName(), new DeploymentOptions().setInstances(3).setConfig(new JsonObject((Map)config)), ctx.asyncAssertSuccess(id -> { deploymentID.set(id); sentLatch.complete(); })); sentLatch.awaitSuccess(10000); Async async = ctx.async(); kafkaCluster.useTo().consumeStrings("the_topic", num, 10, TimeUnit.SECONDS, () -> { vertx.undeploy(deploymentID.get(), ctx.asyncAssertSuccess(v -> async.complete())); }); async.awaitSuccess(10000); waitUntil(() -> countThreads("kafka-producer-network-thread") == numKafkaProducerNetworkThread); }
@Test public void testSharedProducerCleanupInVerticle(TestContext ctx) { Properties config = kafkaCluster.useTo().getProducerProperties("the_producer"); config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); int num = 3; Async sentLatch = ctx.async(num); AtomicReference<String> deploymentID = new AtomicReference<>(); vertx.deployVerticle(TheVerticle.class.getName(), new DeploymentOptions().setInstances(3).setConfig(new JsonObject((Map)config)), ctx.asyncAssertSuccess(id -> { deploymentID.set(id); sentLatch.complete(); })); sentLatch.awaitSuccess(10000); Async async = ctx.async(); kafkaCluster.useTo().consumeStrings("the_topic", num, 10, TimeUnit.SECONDS, () -> { vertx.undeploy(deploymentID.get(), ctx.asyncAssertSuccess(v -> async.complete())); }); async.awaitSuccess(10000); waitUntil(() -> countThreads("kafka-producer-network-thread") == numKafkaProducerNetworkThread); }
/** * Asynchronously consume all messages on the given topic from the cluster. * * @param topicName the name of the topic; may not be null * @param count the expected number of messages to read before terminating; may not be null * @param timeout the maximum time that this consumer should run before terminating; must be positive * @param unit the unit of time for the timeout; may not be null * @param completion the function to call when all messages have been consumed; may be null */ public void consumeStrings(String topicName, int count, long timeout, TimeUnit unit, Runnable completion) { consumeStrings(topicName, count, timeout, unit, completion, (key, value) -> true); }