/** * @return the key (or null if no key is specified) */ public K key() { K ret = (K)__typeArg_0.wrap(delegate.key()); return ret; }
/** * @return the position of this record in the corresponding Kafka partition. */ public long offset() { long ret = delegate.offset(); return ret; }
/** * @return the value */ public V value() { V ret = (V)__typeArg_1.wrap(delegate.value()); return ret; }
@Override public Buffer toMessages(KafkaConsumerRecords<String, byte[]> records) { JsonArray jsonArray = new JsonArray(); for (int i = 0; i <records.size(); i++){ JsonObject jsonObject = new JsonObject(); jsonObject.put("topic", records.recordAt(i).topic()); jsonObject.put("key", records.recordAt(i).key()); jsonObject.put("value", new String(records.recordAt(i).value())); jsonObject.put("partition", records.recordAt(i).partition()); jsonObject.put("offset", records.recordAt(i).offset()); jsonArray.add(jsonObject); } return jsonArray.toBuffer(); } }
int partition = record.partition(); long offset = record.offset(); String deliveryTag = partition + "_" + offset; Message message = this.converter.toMessage(this.sender.getSource().getAddress(), record); this.offsetTracker.track(partition, offset, record.record()); log.debug("Tracked {} - {} [{}]", record.topic(), record.partition(), record.offset());
/** * @return the list of consumer record headers */ public List<io.vertx.rxjava.kafka.client.producer.KafkaHeader> headers() { List<io.vertx.rxjava.kafka.client.producer.KafkaHeader> ret = delegate.headers().stream().map(elt -> io.vertx.rxjava.kafka.client.producer.KafkaHeader.newInstance(elt)).collect(java.util.stream.Collectors.toList()); return ret; }
/** * @return the topic this record is received from */ public String topic() { String ret = delegate.topic(); return ret; }
/** * @return the partition from which this record is received */ public int partition() { int ret = delegate.partition(); return ret; }
/** * @return the checksum (CRC32) of the record. */ @Deprecated() public long checksum() { long ret = delegate.checksum(); return ret; }
@Override public Message toMessage(String address, KafkaConsumerRecord<String, byte[]> record) { Message message = Proton.message(); message.setAddress(address); message.decode(record.value(), 0, record.value().length); // put message annotations about partition, offset and key (if not null) Map<Symbol, Object> map = new HashMap<>(); map.put(Symbol.valueOf(AmqpBridge.AMQP_PARTITION_ANNOTATION), record.partition()); map.put(Symbol.valueOf(AmqpBridge.AMQP_OFFSET_ANNOTATION), record.offset()); map.put(Symbol.valueOf(AmqpBridge.AMQP_KEY_ANNOTATION), record.key()); map.put(Symbol.valueOf(AmqpBridge.AMQP_TOPIC_ANNOTATION), record.topic()); MessageAnnotations messageAnnotations = new MessageAnnotations(map); message.setMessageAnnotations(messageAnnotations); return message; }
/** * @return the list of consumer record headers */ public List<io.vertx.rxjava.kafka.client.producer.KafkaHeader> headers() { List<io.vertx.rxjava.kafka.client.producer.KafkaHeader> ret = delegate.headers().stream().map(elt -> io.vertx.rxjava.kafka.client.producer.KafkaHeader.newInstance(elt)).collect(java.util.stream.Collectors.toList()); return ret; }
/** * @return the topic this record is received from */ public String topic() { String ret = delegate.topic(); return ret; }
/** * @return the partition from which this record is received */ public int partition() { int ret = delegate.partition(); return ret; }
/** * @return the checksum (CRC32) of the record. */ @Deprecated() public long checksum() { long ret = delegate.checksum(); return ret; }
@Override public Message toMessage(String address, KafkaConsumerRecord<String, byte[]> record) { Message message = Proton.message(); message.setAddress(address); // put message annotations about partition, offset and key (if not null) Map<Symbol, Object> map = new HashMap<>(); map.put(Symbol.valueOf(AmqpBridge.AMQP_PARTITION_ANNOTATION), record.partition()); map.put(Symbol.valueOf(AmqpBridge.AMQP_OFFSET_ANNOTATION), record.offset()); map.put(Symbol.valueOf(AmqpBridge.AMQP_KEY_ANNOTATION), record.key()); map.put(Symbol.valueOf(AmqpBridge.AMQP_TOPIC_ANNOTATION), record.topic()); MessageAnnotations messageAnnotations = new MessageAnnotations(map); message.setMessageAnnotations(messageAnnotations); message.setBody(new Data(new Binary(record.value()))); return message; }
/** * @return the key (or null if no key is specified) */ public K key() { K ret = (K)__typeArg_0.wrap(delegate.key()); return ret; }
/** * @return the position of this record in the corresponding Kafka partition. */ public long offset() { long ret = delegate.offset(); return ret; }
/** * @return the value */ public V value() { V ret = (V)__typeArg_1.wrap(delegate.value()); return ret; }
@Test public void testConsumerWithHeader(TestContext ctx) { int numMessages = 1000; String topicName = "testConsumerWithHeader"; Properties config = setupConsumeWithHeaders(ctx, numMessages, topicName); consumer = createConsumer(vertx, config); KafkaConsumer<String, String> consumer = new KafkaConsumerImpl<>(this.consumer); Async done = ctx.async(); AtomicInteger count = new AtomicInteger(numMessages); AtomicInteger headerIndex = new AtomicInteger(); consumer.exceptionHandler(ctx::fail); consumer.handler(rec -> { List<KafkaHeader> headers = rec.headers(); ctx.assertEquals(1, headers.size()); KafkaHeader header = headers.get(0); ctx.assertEquals("header_key" + headerIndex.get(), header.key()); ctx.assertEquals("header_value" + headerIndex.getAndIncrement(), header.value().toString()); if (count.decrementAndGet() == 0) { done.complete(); } }); consumer.subscribe(Collections.singleton(topicName)); }