@Override public void onCompletion(final RecordMetadata metadata, final Exception e) { final String keyString = Objects.toString(key); final String valueString = Objects.toString(value); if (e != null) { System.err.println("Error when sending message to topic: '" + topic + "', with key: '" + keyString + "', and value: '" + valueString + "'"); e.printStackTrace(System.err); } else { System.out.println(keyString + " --> (" + valueString + ") ts:" + metadata.timestamp()); } } }
@Test public void testBatchCannotCompleteTwice() throws Exception { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); MockCallback callback = new MockCallback(); FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, callback, now); batch.done(500L, 10L, null); assertEquals(1, callback.invocations); assertNull(callback.exception); assertNotNull(callback.metadata); try { batch.done(1000L, 20L, null); fail("Expected exception from done"); } catch (IllegalStateException e) { // expected } RecordMetadata recordMetadata = future.get(); assertEquals(500L, recordMetadata.offset()); assertEquals(10L, recordMetadata.timestamp()); }
@Test @SuppressWarnings("deprecation") public void testConstructionWithRelativeOffset() { TopicPartition tp = new TopicPartition("foo", 0); long timestamp = 2340234L; int keySize = 3; int valueSize = 5; long baseOffset = 15L; long relativeOffset = 3L; Long checksum = 908923L; RecordMetadata metadata = new RecordMetadata(tp, baseOffset, relativeOffset, timestamp, checksum, keySize, valueSize); assertEquals(tp.topic(), metadata.topic()); assertEquals(tp.partition(), metadata.partition()); assertEquals(timestamp, metadata.timestamp()); assertEquals(baseOffset + relativeOffset, metadata.offset()); assertEquals(checksum.longValue(), metadata.checksum()); assertEquals(keySize, metadata.serializedKeySize()); assertEquals(valueSize, metadata.serializedValueSize()); }
@Test @SuppressWarnings("deprecation") public void testConstructionWithMissingRelativeOffset() { TopicPartition tp = new TopicPartition("foo", 0); long timestamp = 2340234L; int keySize = 3; int valueSize = 5; Long checksum = 908923L; RecordMetadata metadata = new RecordMetadata(tp, -1L, -1L, timestamp, checksum, keySize, valueSize); assertEquals(tp.topic(), metadata.topic()); assertEquals(tp.partition(), metadata.partition()); assertEquals(timestamp, metadata.timestamp()); assertFalse(metadata.hasOffset()); assertEquals(-1L, metadata.offset()); assertEquals(checksum.longValue(), metadata.checksum()); assertEquals(keySize, metadata.serializedKeySize()); assertEquals(valueSize, metadata.serializedValueSize()); }
/** * Render a view of the {@link RecordMetadata} that resulted from writing * messages to Kafka. * * @param records The record metadata. * @param properties The properties. * @return */ private Object render(List<RecordMetadata> records, Properties properties) { Object view; if(MESSAGE_VIEW_RICH.equals(getMessageView(properties))) { // build a 'rich' view of the messages that were written List<Object> responses = new ArrayList<>(); for(RecordMetadata record: records) { // render the 'rich' view of the record Map<String, Object> richView = new HashMap<>(); richView.put("topic", record.topic()); richView.put("partition", record.partition()); richView.put("offset", record.offset()); richView.put("timestamp", record.timestamp()); responses.add(richView); } // the rich view is a list of maps containing metadata about how each message was written view = responses; } else { // otherwise, the view is simply a count of the number of messages written view = CollectionUtils.size(records); } return view; }
public void onCompletion(RecordMetadata recordMetadata, Exception e) { // executes every time a record is successfully sent or an exception is thrown if (e == null) { // the record was successfully sent logger.info("Received new metadata. \n" + "Topic:" + recordMetadata.topic() + "\n" + "Partition: " + recordMetadata.partition() + "\n" + "Offset: " + recordMetadata.offset() + "\n" + "Timestamp: " + recordMetadata.timestamp()); } else { logger.error("Error while producing", e); } } });
public void onCompletion(RecordMetadata recordMetadata, Exception e) { // executes every time a record is successfully sent or an exception is thrown if (e == null) { // the record was successfully sent logger.info("Received new metadata. \n" + "Topic:" + recordMetadata.topic() + "\n" + "Partition: " + recordMetadata.partition() + "\n" + "Offset: " + recordMetadata.offset() + "\n" + "Timestamp: " + recordMetadata.timestamp()); } else { logger.error("Error while producing", e); } } }).get(); // block the .send() to make it synchronous - don't do this in production!
/** * Emit messages to the relevant kafka topics * @return true if the messages were successfully delivered to the kafka topics; false otherwise */ public Boolean emit() { try { final List<Future<RecordMetadata>> results = new ArrayList<>(); if (async) { results.add(producer.send(new ProducerRecord<>(TOPIC_CACHE, identifier.getIRIString(), serialize(dataset)))); } // Update the containment triples of the parent resource if this is a delete or create operation parent.ifPresent(emitToParent(identifier, dataset, results)); for (final Future<RecordMetadata> result : results) { final RecordMetadata res = result.get(); LOGGER.debug("Send record to topic: {}, {}", res, res.timestamp()); } return true; } catch (final InterruptedException | ExecutionException ex) { LOGGER.error("Error sending record to kafka topic: {}", ex.getMessage()); return false; } }
public void sendMessages(String topic, int count, CountDownLatch latch) throws InterruptedException { sender.<Integer>send(Flux.range(1, count) .map(i -> SenderRecord.create(new ProducerRecord<>(topic, i, "Message_" + i), i))) .doOnError(e -> log.error("Send failed", e)) .subscribe(r -> { RecordMetadata metadata = r.recordMetadata(); System.out.printf("Message %d sent successfully, topic-partition=%s-%d offset=%d timestamp=%s\n", r.correlationMetadata(), metadata.topic(), metadata.partition(), metadata.offset(), dateFormat.format(new Date(metadata.timestamp()))); latch.countDown(); }); }
@Override public void onSuccess(SendResult<String, String> sendResult) { ProducerRecord<String, String> producerRecord = sendResult.getProducerRecord(); RecordMetadata recordMetadata = sendResult.getRecordMetadata(); log.info("onSuccess(), offset {} partition {} timestamp {} for '{}'=='{}'", recordMetadata.offset(), recordMetadata.partition(), recordMetadata.timestamp(), producerRecord.key(), producerRecord.value()); }
public static RecordMetadata from(org.apache.kafka.clients.producer.RecordMetadata metadata) { return new RecordMetadata(metadata.checksum(), metadata.offset(), metadata.partition(), metadata.timestamp(), metadata.topic()); }