@Override public void start() throws Exception { systemMBean = ManagementFactory.getPlatformMXBean(OperatingSystemMXBean.class); // A random identifier String pid = UUID.randomUUID().toString(); // Get the kafka producer config JsonObject config = config(); // Create the producer producer = KafkaWriteStream.create(vertx, config.getMap(), String.class, JsonObject.class); // Publish the metircs in Kafka vertx.setPeriodic(1000, id -> { JsonObject metrics = new JsonObject(); metrics.put("CPU", systemMBean.getProcessCpuLoad()); metrics.put("Mem", systemMBean.getTotalPhysicalMemorySize() - systemMBean.getFreePhysicalMemorySize()); producer.write(new ProducerRecord<>("the_topic", new JsonObject().put(pid, metrics))); }); }
@Override public void stop() throws Exception { if (producer != null) { producer.close(); } } }
static <K, V> KafkaWriteStream<K, V> producer(Vertx vertx, Producer<K, V> producer) { return KafkaWriteStream.create(vertx, producer); } }
@Test public void testProducerError(TestContext ctx) throws Exception { TestProducer mock = new TestProducer(); KafkaWriteStream<String, String> producer = ProducerTest.producer(Vertx.vertx(), mock); producer.write(new ProducerRecord<>("the_topic", 0, 0L, "abc", "def")); RuntimeException cause = new RuntimeException(); Async async = ctx.async(); producer.exceptionHandler(err -> { ctx.assertEquals(cause, err); async.complete(); }); mock.assertErrorNext(cause); }
private void testProducerDrain(TestContext ctx, RuntimeException failure) throws Exception { TestProducer mock = new TestProducer(); KafkaWriteStream<String, String> producer = ProducerTest.producer(Vertx.vertx(), mock); int sent = 0; while (!producer.writeQueueFull()) { producer.write(new ProducerRecord<>("the_topic", 0, 0L, "abc", "def")); sent++; } Async async = ctx.async(); producer.drainHandler(v -> { ctx.assertTrue(Context.isOnVertxThread()); ctx.assertTrue(Context.isOnEventLoopThread()); async.complete(); }); for (int i = 0;i < sent / 2;i++) { if (failure != null) { mock.assertErrorNext(failure); } else { mock.assertCompleteNext(); } } if (failure != null) { mock.assertErrorNext(failure); } else { mock.assertCompleteNext(); } assertFalse(producer.writeQueueFull()); }
@Override @SuppressWarnings("unchecked") public KafkaProducer<K, V> write(KafkaProducerRecord<K, V> record, Handler<AsyncResult<RecordMetadata>> handler) { this.stream.write(record.record(), done -> { if (handler != null) { if (done.succeeded()) { handler.handle(Future.succeededFuture(Helper.from(done.result()))); } else { handler.handle(Future.failedFuture(done.cause())); } } }); return this; }
@Override public KafkaProducer<K, V> exceptionHandler(Handler<Throwable> handler) { this.stream.exceptionHandler(handler); return this; }
@Override public KafkaProducer<K, V> drainHandler(Handler<Void> handler) { this.stream.drainHandler(handler); return this; }
@Override public boolean writeQueueFull() { return this.stream.writeQueueFull(); }
@Override public void end() { this.stream.end(); }
public static <K, V> KafkaProducer<K, V> createShared(Vertx vertx, String name, Map<String, String> config) { return createShared(vertx, name, () -> KafkaWriteStream.create(vertx, new HashMap<>(config))); }
@Test public void testProducerError(TestContext ctx) throws Exception { TestProducer mock = new TestProducer(); KafkaWriteStream<String, String> producer = ProducerTest.producer(Vertx.vertx(), mock); producer.write(new ProducerRecord<>("the_topic", 0, 0L, "abc", "def")); RuntimeException cause = new RuntimeException(); Async async = ctx.async(); producer.exceptionHandler(err -> { ctx.assertEquals(cause, err); async.complete(); }); mock.assertErrorNext(cause); }
private void testProducerDrain(TestContext ctx, RuntimeException failure) throws Exception { TestProducer mock = new TestProducer(); KafkaWriteStream<String, String> producer = ProducerTest.producer(Vertx.vertx(), mock); int sent = 0; while (!producer.writeQueueFull()) { producer.write(new ProducerRecord<>("the_topic", 0, 0L, "abc", "def")); sent++; } Async async = ctx.async(); producer.drainHandler(v -> { ctx.assertTrue(Context.isOnVertxThread()); ctx.assertTrue(Context.isOnEventLoopThread()); async.complete(); }); for (int i = 0;i < sent / 2;i++) { if (failure != null) { mock.assertErrorNext(failure); } else { mock.assertCompleteNext(); } } if (failure != null) { mock.assertErrorNext(failure); } else { mock.assertCompleteNext(); } assertFalse(producer.writeQueueFull()); }
@Test // Should fail because it cannot reach the broker public void testBrokerConnectionError(TestContext ctx) throws Exception { Properties props = new Properties(); // use a wrong port on purpose, because Broker IS running props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9091"); props.setProperty(ProducerConfig.ACKS_CONFIG, "1"); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 2000); producer = producer(Vertx.vertx(), props); producer.write(new ProducerRecord<>("testBrokerConnectionError", 0, "key", "value"), ctx.asyncAssertFailure()); }
@Test public void testProducerProduce(TestContext ctx) throws Exception { String topicName = "testProducerProduce"; Properties config = kafkaCluster.useTo().getProducerProperties("testProducerProduce_producer"); config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); producer = producer(Vertx.vertx(), config); producer.exceptionHandler(ctx::fail); KafkaProducer<String, String> producer = new KafkaProducerImpl<>(this.producer); int numMessages = 100000; for (int i = 0;i < numMessages;i++) { producer.write(KafkaProducerRecord.create(topicName, "key-" + i, "value-" + i, 0) .addHeader("header_key", "header_value-" + i)); } assertReceiveMessages(ctx, topicName, numMessages); }
@Override @SuppressWarnings("unchecked") public void end(KafkaProducerRecord<K, V> kafkaProducerRecord) { this.stream.end(kafkaProducerRecord.record()); }
public static <K, V> KafkaProducer<K, V> createShared(Vertx vertx, String name, Properties config, Class<K> keyType, Class<V> valueType) { return createShared(vertx, name, () -> KafkaWriteStream.create(vertx, config, keyType, valueType)); }
@Test public void testStreamProduce(TestContext ctx) throws Exception { String topicName = "testStreamProduce"; Properties config = kafkaCluster.useTo().getProducerProperties("testStreamProduce_producer"); config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); producer = producer(Vertx.vertx(), config); producer.exceptionHandler(ctx::fail); int numMessages = 100000; for (int i = 0;i < numMessages;i++) { ProducerRecord<String, String> record = new ProducerRecord<>(topicName, 0, "key-" + i, "value-" + i); record.headers().add("header_key", ("header_value-" + i).getBytes()); producer.write(record); } assertReceiveMessages(ctx, topicName, numMessages); }