@Test @SuppressWarnings("deprecation") public void testConstructionWithMissingRelativeOffset() { TopicPartition tp = new TopicPartition("foo", 0); long timestamp = 2340234L; int keySize = 3; int valueSize = 5; Long checksum = 908923L; RecordMetadata metadata = new RecordMetadata(tp, -1L, -1L, timestamp, checksum, keySize, valueSize); assertEquals(tp.topic(), metadata.topic()); assertEquals(tp.partition(), metadata.partition()); assertEquals(timestamp, metadata.timestamp()); assertFalse(metadata.hasOffset()); assertEquals(-1L, metadata.offset()); assertEquals(checksum.longValue(), metadata.checksum()); assertEquals(keySize, metadata.serializedKeySize()); assertEquals(valueSize, metadata.serializedValueSize()); }
public void onCompletion(RecordMetadata metadata, Exception exception) { if (exception != null) { logger.debug("Error sending message to Kafka {} ", exception.getMessage()); } if (logger.isDebugEnabled()) { long eventElapsedTime = System.currentTimeMillis() - startTime; if (metadata != null) { logger.debug("Acked message partition:{} ofset:{}", metadata.partition(), metadata.offset()); } logger.debug("Elapsed time for send: {}", eventElapsedTime); } } }
@Override public void onAcknowledgement(RecordMetadata metadata, Exception exception) { onAckCount++; if (exception != null) { onErrorAckCount++; // the length check is just to call topic() method and let it throw an exception // if RecordMetadata.TopicPartition is null if (metadata != null && metadata.topic().length() >= 0) { onErrorAckWithTopicSetCount++; if (metadata.partition() >= 0) onErrorAckWithTopicPartitionSetCount++; } } if (throwExceptionOnAck) throw new KafkaException("Injected exception in AppendProducerInterceptor.onAcknowledgement"); }
/** * Render a view of the {@link RecordMetadata} that resulted from writing * messages to Kafka. * * @param records The record metadata. * @param properties The properties. * @return */ private Object render(List<RecordMetadata> records, Properties properties) { Object view; if(MESSAGE_VIEW_RICH.equals(getMessageView(properties))) { // build a 'rich' view of the messages that were written List<Object> responses = new ArrayList<>(); for(RecordMetadata record: records) { // render the 'rich' view of the record Map<String, Object> richView = new HashMap<>(); richView.put("topic", record.topic()); richView.put("partition", record.partition()); richView.put("offset", record.offset()); richView.put("timestamp", record.timestamp()); responses.add(richView); } // the rich view is a list of maps containing metadata about how each message was written view = responses; } else { // otherwise, the view is simply a count of the number of messages written view = CollectionUtils.size(records); } return view; }
public void run() { try { long nextIndex = _nextIndexPerPartition.get(_partition).get(); long currMs = System.currentTimeMillis(); String message = Utils.jsonFromFields(_topic, nextIndex, currMs, _producerId, _recordSize); BaseProducerRecord record = new BaseProducerRecord(_topic, _partition, _key, message); RecordMetadata metadata = _producer.send(record, _sync); _sensors._produceDelay.record(System.currentTimeMillis() - currMs); _sensors._recordsProduced.record(); _sensors._recordsProducedPerPartition.get(_partition).record(); if (nextIndex == -1 && _sync) { nextIndex = metadata.offset(); } else { nextIndex = nextIndex + 1; } _nextIndexPerPartition.get(_partition).set(nextIndex); } catch (Exception e) { _sensors._produceError.record(); _sensors._produceErrorPerPartition.get(_partition).record(); LOG.warn(_name + " failed to send message", e); } } }
@Override public void onCompletion(RecordMetadata metadata, Exception exception) { if (exception != null) { log.warn("Failed to produce metrics message", exception); } else { log.debug("Produced metrics message of size {} with offset {} to topic partition {}-{}", metadata.serializedValueSize(), metadata.offset(), metadata.topic(), metadata.partition()); } } });
@Test public void testAutoCompleteMock() throws Exception { buildMockProducer(true); Future<RecordMetadata> metadata = producer.send(record1); assertTrue("Send should be immediately complete", metadata.isDone()); assertFalse("Send should be successful", isError(metadata)); assertEquals("Offset should be 0", 0L, metadata.get().offset()); assertEquals(topic, metadata.get().topic()); assertEquals("We should have the record in our history", singletonList(record1), producer.history()); producer.clear(); assertEquals("Clear should erase our history", 0, producer.history().size()); }
@Override public void onSuccess(SendResult<String, String> sendResult) { ProducerRecord<String, String> producerRecord = sendResult.getProducerRecord(); RecordMetadata recordMetadata = sendResult.getRecordMetadata(); log.info("onSuccess(), offset {} partition {} timestamp {} for '{}'=='{}'", recordMetadata.offset(), recordMetadata.partition(), recordMetadata.timestamp(), producerRecord.key(), producerRecord.value()); }
private Callback setProducerCallback() { return (recordMetadata, e) -> { if (e != null) { log.debug("Event send failed [%s]", e.getMessage()); if (recordMetadata.topic().equals(config.getMetricTopic())) { metricLost.incrementAndGet(); } else if (recordMetadata.topic().equals(config.getAlertTopic())) { alertLost.incrementAndGet(); } else { invalidLost.incrementAndGet(); } } }; }
@Test public void testBatchCannotCompleteTwice() throws Exception { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); MockCallback callback = new MockCallback(); FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, callback, now); batch.done(500L, 10L, null); assertEquals(1, callback.invocations); assertNull(callback.exception); assertNotNull(callback.metadata); try { batch.done(1000L, 20L, null); fail("Expected exception from done"); } catch (IllegalStateException e) { // expected } RecordMetadata recordMetadata = future.get(); assertEquals(500L, recordMetadata.offset()); assertEquals(10L, recordMetadata.timestamp()); }
@Test public void testPartitioner() throws Exception { PartitionInfo partitionInfo0 = new PartitionInfo(topic, 0, null, null, null); PartitionInfo partitionInfo1 = new PartitionInfo(topic, 1, null, null, null); Cluster cluster = new Cluster(null, new ArrayList<Node>(0), asList(partitionInfo0, partitionInfo1), Collections.<String>emptySet(), Collections.<String>emptySet()); MockProducer<String, String> producer = new MockProducer<>(cluster, true, new DefaultPartitioner(), new StringSerializer(), new StringSerializer()); ProducerRecord<String, String> record = new ProducerRecord<>(topic, "key", "value"); Future<RecordMetadata> metadata = producer.send(record); assertEquals("Partition should be correct", 1, metadata.get().partition()); producer.clear(); assertEquals("Clear should erase our history", 0, producer.history().size()); producer.close(); }
public void onCompletion(RecordMetadata metadata, Exception exception) { metadata = metadata != null ? metadata : new RecordMetadata(tp, -1, -1, RecordBatch.NO_TIMESTAMP, Long.valueOf(-1L), -1, -1); this.interceptors.onAcknowledgement(metadata, exception); if (this.userCallback != null) this.userCallback.onCompletion(metadata, exception); } }
@Test @SuppressWarnings("deprecation") public void testNullChecksum() { long timestamp = 2340234L; int keySize = 3; int valueSize = 5; RecordMetadata metadata = new RecordMetadata(new TopicPartition("foo", 0), 15L, 3L, timestamp, null, keySize, valueSize); assertEquals(DefaultRecord.computePartialChecksum(timestamp, keySize, valueSize), metadata.checksum()); }
@Override public void onCompletion(final RecordMetadata metadata, final Exception e) { final String keyString = Objects.toString(key); final String valueString = Objects.toString(value); if (e != null) { System.err.println("Error when sending message to topic: '" + topic + "', with key: '" + keyString + "', and value: '" + valueString + "'"); e.printStackTrace(System.err); } else { System.out.println(keyString + " --> (" + valueString + ") ts:" + metadata.timestamp()); } } }
@Override public void onCompletion(RecordMetadata md, Exception e) { if ( e != null ) { this.failedMessageCount.inc(); this.failedMessageMeter.mark(); LOGGER.error(e.getClass().getSimpleName() + " @ " + position + " -- " + key); LOGGER.error(e.getLocalizedMessage()); if ( e instanceof RecordTooLargeException ) { LOGGER.error("Considering raising max.request.size broker-side."); } else if (!this.context.getConfig().ignoreProducerError) { this.context.terminate(e); return; } } else { this.succeededMessageCount.inc(); this.succeededMessageMeter.mark(); if (LOGGER.isDebugEnabled()) { LOGGER.debug("-> key:" + key + ", partition:" + md.partition() + ", offset:" + md.offset()); LOGGER.debug(" " + this.json); LOGGER.debug(" " + position); LOGGER.debug(""); } } cc.markCompleted(); } }
@Test public void shouldIncludeCommandSequenceNumberInSuccessfulQueuedCommandStatus() { // When: final QueuedCommandStatus commandStatus = commandStore.enqueueCommand(statementText, statement, KSQL_CONFIG, OVERRIDE_PROPERTIES); // Then: assertThat(commandStatus.getCommandSequenceNumber(), equalTo(recordMetadata.offset())); }
@Override public void convertAndSend(SmartCosmosEvent<Object> message) throws SmartCosmosEventException { MessageBuilder builder = MessageBuilder.withPayload(message).setHeader(KafkaHeaders.TOPIC, message.getEventType()); if (StringUtils.hasText(message.getEventUrn())) { builder.setHeader(KafkaHeaders.MESSAGE_KEY,message.getEventUrn()); } ListenableFuture<SendResult<String, Object>> future = kafkaTemplate.send(builder.build()); future.addCallback(result -> log.info("Event Successfully sent to Kafka topic {}, partition {}", result.getRecordMetadata().topic(), result.getRecordMetadata().partition()), ex -> log.error("Failed to send event to Kafka", ex)); } }