@Override public void start() throws Exception { systemMBean = ManagementFactory.getPlatformMXBean(OperatingSystemMXBean.class); // A random identifier String pid = UUID.randomUUID().toString(); // Get the kafka producer config JsonObject config = config(); // Create the producer producer = KafkaWriteStream.create(vertx, config.getMap(), String.class, JsonObject.class); // Publish the metircs in Kafka vertx.setPeriodic(1000, id -> { JsonObject metrics = new JsonObject(); metrics.put("CPU", systemMBean.getProcessCpuLoad()); metrics.put("Mem", systemMBean.getTotalPhysicalMemorySize() - systemMBean.getFreePhysicalMemorySize()); producer.write(new ProducerRecord<>("the_topic", new JsonObject().put(pid, metrics))); }); }
private void testProducerDrain(TestContext ctx, RuntimeException failure) throws Exception { TestProducer mock = new TestProducer(); KafkaWriteStream<String, String> producer = ProducerTest.producer(Vertx.vertx(), mock); int sent = 0; while (!producer.writeQueueFull()) { producer.write(new ProducerRecord<>("the_topic", 0, 0L, "abc", "def")); sent++; } Async async = ctx.async(); producer.drainHandler(v -> { ctx.assertTrue(Context.isOnVertxThread()); ctx.assertTrue(Context.isOnEventLoopThread()); async.complete(); }); for (int i = 0;i < sent / 2;i++) { if (failure != null) { mock.assertErrorNext(failure); } else { mock.assertCompleteNext(); } } if (failure != null) { mock.assertErrorNext(failure); } else { mock.assertCompleteNext(); } assertFalse(producer.writeQueueFull()); }
@Override @SuppressWarnings("unchecked") public KafkaProducer<K, V> write(KafkaProducerRecord<K, V> record, Handler<AsyncResult<RecordMetadata>> handler) { this.stream.write(record.record(), done -> { if (handler != null) { if (done.succeeded()) { handler.handle(Future.succeededFuture(Helper.from(done.result()))); } else { handler.handle(Future.failedFuture(done.cause())); } } }); return this; }
private void testProducerDrain(TestContext ctx, RuntimeException failure) throws Exception { TestProducer mock = new TestProducer(); KafkaWriteStream<String, String> producer = ProducerTest.producer(Vertx.vertx(), mock); int sent = 0; while (!producer.writeQueueFull()) { producer.write(new ProducerRecord<>("the_topic", 0, 0L, "abc", "def")); sent++; } Async async = ctx.async(); producer.drainHandler(v -> { ctx.assertTrue(Context.isOnVertxThread()); ctx.assertTrue(Context.isOnEventLoopThread()); async.complete(); }); for (int i = 0;i < sent / 2;i++) { if (failure != null) { mock.assertErrorNext(failure); } else { mock.assertCompleteNext(); } } if (failure != null) { mock.assertErrorNext(failure); } else { mock.assertCompleteNext(); } assertFalse(producer.writeQueueFull()); }
@Override public void start() throws Exception { systemMBean = ManagementFactory.getPlatformMXBean(OperatingSystemMXBean.class); // A random identifier String pid = UUID.randomUUID().toString(); // Get the kafka producer config JsonObject config = config(); // Create the producer producer = KafkaWriteStream.create(vertx.getDelegate(), config.getMap(), String.class, JsonObject.class); // Publish the metircs in Kafka vertx.setPeriodic(1000, id -> { JsonObject metrics = new JsonObject(); metrics.put("CPU", systemMBean.getProcessCpuLoad()); metrics.put("Mem", systemMBean.getTotalPhysicalMemorySize() - systemMBean.getFreePhysicalMemorySize()); producer.write(new ProducerRecord<>("the_topic", new JsonObject().put(pid, metrics))); }); }
@Test // Should fail because it cannot reach the broker public void testBrokerConnectionError(TestContext ctx) throws Exception { Properties props = new Properties(); // use a wrong port on purpose, because Broker IS running props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9091"); props.setProperty(ProducerConfig.ACKS_CONFIG, "1"); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 2000); producer = producer(Vertx.vertx(), props); producer.write(new ProducerRecord<>("testBrokerConnectionError", 0, "key", "value"), ctx.asyncAssertFailure()); }
@Test // Should fail because it cannot reach the broker public void testBrokerConnectionError(TestContext ctx) throws Exception { Properties props = new Properties(); // use a wrong port on purpose, because Broker IS running props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9091"); props.setProperty(ProducerConfig.ACKS_CONFIG, "1"); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 2000); producer = producer(Vertx.vertx(), props); producer.write(new ProducerRecord<>("testBrokerConnectionError", 0, "key", "value"), ctx.asyncAssertFailure()); }
producer.write(new ProducerRecord<>(topic, 0, 0L, "the_key_" + i, "the_value_" + i));
producer.write(new ProducerRecord<>(topic, 0, 0L, "the_key_" + i, "the_value_" + i));
@Test public void testBlockingBroker(TestContext ctx) throws Exception { // Use a port different from default 9092, because Broker IS running int port = 9091; Async serverAsync = ctx.async(); NetServer server = vertx.createNetServer().connectHandler(so -> { }).listen(port, ctx.asyncAssertSuccess(v -> serverAsync.complete())); serverAsync.awaitSuccess(10000); Properties props = new Properties(); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:" + port); props.setProperty(ProducerConfig.ACKS_CONFIG, "1"); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 2000); producer = producer(Vertx.vertx(), props); producer.write(new ProducerRecord<>("testBlockingBroker", 0, "key", "value"), ctx.asyncAssertFailure()); }
@Test public void testBlockingBroker(TestContext ctx) throws Exception { // Use a port different from default 9092, because Broker IS running int port = 9091; Async serverAsync = ctx.async(); NetServer server = vertx.createNetServer().connectHandler(so -> { }).listen(port, ctx.asyncAssertSuccess(v -> serverAsync.complete())); serverAsync.awaitSuccess(10000); Properties props = new Properties(); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:" + port); props.setProperty(ProducerConfig.ACKS_CONFIG, "1"); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 2000); producer = producer(Vertx.vertx(), props); producer.write(new ProducerRecord<>("testBlockingBroker", 0, "key", "value"), ctx.asyncAssertFailure()); }
@Test public void testStreamProduce(TestContext ctx) throws Exception { String topicName = "testStreamProduce"; Properties config = kafkaCluster.useTo().getProducerProperties("testStreamProduce_producer"); config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); producer = producer(Vertx.vertx(), config); producer.exceptionHandler(ctx::fail); int numMessages = 100000; for (int i = 0;i < numMessages;i++) { ProducerRecord<String, String> record = new ProducerRecord<>(topicName, 0, "key-" + i, "value-" + i); record.headers().add("header_key", ("header_value-" + i).getBytes()); producer.write(record); } assertReceiveMessages(ctx, topicName, numMessages); }
@Test public void testStreamProduce(TestContext ctx) throws Exception { String topicName = "testStreamProduce"; Properties config = kafkaCluster.useTo().getProducerProperties("testStreamProduce_producer"); config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); producer = producer(Vertx.vertx(), config); producer.exceptionHandler(ctx::fail); int numMessages = 100000; for (int i = 0;i < numMessages;i++) { ProducerRecord<String, String> record = new ProducerRecord<>(topicName, 0, "key-" + i, "value-" + i); record.headers().add("header_key", ("header_value-" + i).getBytes()); producer.write(record); } assertReceiveMessages(ctx, topicName, numMessages); }
@Test public void testProducerError(TestContext ctx) throws Exception { TestProducer mock = new TestProducer(); KafkaWriteStream<String, String> producer = ProducerTest.producer(Vertx.vertx(), mock); producer.write(new ProducerRecord<>("the_topic", 0, 0L, "abc", "def")); RuntimeException cause = new RuntimeException(); Async async = ctx.async(); producer.exceptionHandler(err -> { ctx.assertEquals(cause, err); async.complete(); }); mock.assertErrorNext(cause); }
@Test public void testProducerError(TestContext ctx) throws Exception { TestProducer mock = new TestProducer(); KafkaWriteStream<String, String> producer = ProducerTest.producer(Vertx.vertx(), mock); producer.write(new ProducerRecord<>("the_topic", 0, 0L, "abc", "def")); RuntimeException cause = new RuntimeException(); Async async = ctx.async(); producer.exceptionHandler(err -> { ctx.assertEquals(cause, err); async.complete(); }); mock.assertErrorNext(cause); }
keys.add(key); values.add(value); writeStream.write(new ProducerRecord<>(prefix + topic, 0, key, value));
keys.add(key); values.add(value); writeStream.write(new ProducerRecord<>(prefix + topic, 0, key, value));