private void collect(final ProducerRecord record, final boolean isError) { collect(isError, record.topic().toLowerCase()); }
@Override public Future<RecordMetadata> send(ProducerRecord<String, String> record, Callback callback) { TopicPartition tp = new TopicPartition(record.topic(), 0); RecordMetadata rm = new RecordMetadata(tp, -1L, -1L, 1L, 2L, 3, 4); if (callback != null) callback.onCompletion(rm, null); return Futures.immediateFuture(rm); }
/** * This is called when client sends the record to KafkaProducer, before key and value gets serialized. * The method calls {@link ProducerInterceptor#onSend(ProducerRecord)} method. ProducerRecord * returned from the first interceptor's onSend() is passed to the second interceptor onSend(), and so on in the * interceptor chain. The record returned from the last interceptor is returned from this method. * * This method does not throw exceptions. Exceptions thrown by any of interceptor methods are caught and ignored. * If an interceptor in the middle of the chain, that normally modifies the record, throws an exception, * the next interceptor in the chain will be called with a record returned by the previous interceptor that did not * throw an exception. * * @param record the record from client * @return producer record to send to topic/partition */ public ProducerRecord<K, V> onSend(ProducerRecord<K, V> record) { ProducerRecord<K, V> interceptRecord = record; for (ProducerInterceptor<K, V> interceptor : this.interceptors) { try { interceptRecord = interceptor.onSend(interceptRecord); } catch (Exception e) { // do not propagate interceptor exception, log and continue calling other interceptors // be careful not to throw exception from here if (record != null) log.warn("Error executing interceptor onSend callback for topic: {}, partition: {}", record.topic(), record.partition(), e); else log.warn("Error executing interceptor onSend callback", e); } } return interceptRecord; }
/** * computes partition for given record. * if the record has partition returns the value otherwise * calls configured partitioner class to compute the partition. */ private int partition(ProducerRecord<K, V> record, byte[] serializedKey, byte[] serializedValue, Cluster cluster) { Integer partition = record.partition(); return partition != null ? partition : partitioner.partition( record.topic(), record.key(), serializedKey, record.value(), serializedValue, cluster); }
/** * This method is called when sending the record fails in {@link ProducerInterceptor#onSend * (ProducerRecord)} method. This method calls {@link ProducerInterceptor#onAcknowledgement(RecordMetadata, Exception)} * method for each interceptor * * @param record The record from client * @param interceptTopicPartition The topic/partition for the record if an error occurred * after partition gets assigned; the topic part of interceptTopicPartition is the same as in record. * @param exception The exception thrown during processing of this record. */ public void onSendError(ProducerRecord<K, V> record, TopicPartition interceptTopicPartition, Exception exception) { for (ProducerInterceptor<K, V> interceptor : this.interceptors) { try { if (record == null && interceptTopicPartition == null) { interceptor.onAcknowledgement(null, exception); } else { if (interceptTopicPartition == null) { interceptTopicPartition = new TopicPartition(record.topic(), record.partition() == null ? RecordMetadata.UNKNOWN_PARTITION : record.partition()); } interceptor.onAcknowledgement(new RecordMetadata(interceptTopicPartition, -1, -1, RecordBatch.NO_TIMESTAMP, Long.valueOf(-1L), -1, -1), exception); } } catch (Exception e) { // do not propagate interceptor exceptions, just log log.warn("Error executing interceptor onAcknowledgement callback", e); } } }
@Override public Future<RecordMetadata> send(ProducerRecord<K, V> producerRecord, Callback callback) { com.twitter.util.Future<DLSN> dlsnFuture; if (null == producerRecord.key()) { dlsnFuture = getUnpartitionedMultiWriter(producerRecord.topic()).write(producerRecord.value()); } else { // TODO: be able to publish to a specific partition dlsnFuture = getPartitionedMultiWriter(producerRecord.topic()).write(producerRecord.key(), producerRecord.value()); } return new DLFutureRecordMetadata(producerRecord.topic(), dlsnFuture, callback); }
@Override public ProducerRecord<String, String> onSend(ProducerRecord<String, String> record) { ONSEND_COUNT.incrementAndGet(); return new ProducerRecord<>( record.topic(), record.partition(), record.key(), record.value().concat(appendStr)); }
if (!this.cluster.partitionsForTopic(record.topic()).isEmpty()) partition = partition(record, this.cluster); TopicPartition topicPartition = new TopicPartition(record.topic(), partition); ProduceRequestResult result = new ProduceRequestResult(topicPartition); FutureRecordMetadata future = new FutureRecordMetadata(result, 0, RecordBatch.NO_TIMESTAMP,
@Override public ProducerRecord<Integer, String> onSend(ProducerRecord<Integer, String> record) { onSendCount++; if (throwExceptionOnSend) throw new KafkaException("Injected exception in AppendProducerInterceptor.onSend"); return new ProducerRecord<>( record.topic(), record.partition(), record.key(), record.value().concat(appendStr)); }
/** * computes partition for given record. */ private int partition(ProducerRecord<K, V> record, Cluster cluster) { Integer partition = record.partition(); String topic = record.topic(); if (partition != null) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); // they have given us a partition, use it if (partition < 0 || partition >= numPartitions) throw new IllegalArgumentException("Invalid partition given with record: " + partition + " is not in the range [0..." + numPartitions + "]."); return partition; } byte[] keyBytes = keySerializer.serialize(topic, record.headers(), record.key()); byte[] valueBytes = valueSerializer.serialize(topic, record.headers(), record.value()); return this.partitioner.partition(topic, record.key(), keyBytes, record.value(), valueBytes, cluster); }
@Test public void testSimple() { MockProducer<String, String> producer = new MockProducer<>(Cluster.empty(), false, null, null, null); KafkaBolt<String, String> bolt = makeBolt(producer); OutputCollector collector = mock(OutputCollector.class); TopologyContext context = mock(TopologyContext.class); Map<String, Object> conf = new HashMap<>(); bolt.prepare(conf, context, collector); String key = "KEY"; String value = "VALUE"; Tuple testTuple = createTestTuple(key, value); bolt.execute(testTuple); assertThat(producer.history().size(), is(1)); ProducerRecord<String, String> arg = producer.history().get(0); LOG.info("GOT {} ->", arg); LOG.info("{}, {}, {}", arg.topic(), arg.key(), arg.value()); assertThat(arg.topic(), is("MY_TOPIC")); assertThat(arg.key(), is(key)); assertThat(arg.value(), is(value)); // Complete the send producer.completeNext(); verify(collector).ack(testTuple); }
@Test public void testSimpleWithError() { MockProducer<String, String> producer = new MockProducer<>(Cluster.empty(), false, null, null, null); KafkaBolt<String, String> bolt = makeBolt(producer); OutputCollector collector = mock(OutputCollector.class); TopologyContext context = mock(TopologyContext.class); Map<String, Object> conf = new HashMap<>(); bolt.prepare(conf, context, collector); String key = "KEY"; String value = "VALUE"; Tuple testTuple = createTestTuple(key, value); bolt.execute(testTuple); assertThat(producer.history().size(), is(1)); ProducerRecord<String, String> arg = producer.history().get(0); LOG.info("GOT {} ->", arg); LOG.info("{}, {}, {}", arg.topic(), arg.key(), arg.value()); assertThat(arg.topic(), is("MY_TOPIC")); assertThat(arg.key(), is(key)); assertThat(arg.value(), is(value)); // Force a send error KafkaException ex = new KafkaException(); producer.errorNext(ex); verify(collector).reportError(ex); verify(collector).fail(testTuple); }
@Test public void testAppendWithKeyLookup() throws Exception { final Appender appender = ctx.getRequiredAppender("KafkaAppenderWithKeyLookup"); final LogEvent logEvent = createLogEvent(); Date date = new Date(); SimpleDateFormat format = new SimpleDateFormat("dd-MM-yyyy"); appender.append(logEvent); final List<ProducerRecord<byte[], byte[]>> history = kafka.history(); assertEquals(1, history.size()); final ProducerRecord<byte[], byte[]> item = history.get(0); assertNotNull(item); assertEquals(TOPIC_NAME, item.topic()); byte[] keyValue = format.format(date).getBytes(StandardCharsets.UTF_8); assertArrayEquals(item.key(), keyValue); assertEquals(LOG_MESSAGE, new String(item.value(), StandardCharsets.UTF_8)); }
@Test public void testAppendWithKey() throws Exception { final Appender appender = ctx.getRequiredAppender("KafkaAppenderWithKey"); final LogEvent logEvent = createLogEvent(); appender.append(logEvent); final List<ProducerRecord<byte[], byte[]>> history = kafka.history(); assertEquals(1, history.size()); final ProducerRecord<byte[], byte[]> item = history.get(0); assertNotNull(item); assertEquals(TOPIC_NAME, item.topic()); String msgKey = item.key().toString(); byte[] keyValue = "key".getBytes(StandardCharsets.UTF_8); assertArrayEquals(item.key(), keyValue); assertEquals(LOG_MESSAGE, new String(item.value(), StandardCharsets.UTF_8)); }
@Test public void testAsyncAppend() throws Exception { final Appender appender = ctx.getRequiredAppender("AsyncKafkaAppender"); appender.append(createLogEvent()); final List<ProducerRecord<byte[], byte[]>> history = kafka.history(); assertEquals(1, history.size()); final ProducerRecord<byte[], byte[]> item = history.get(0); assertNotNull(item); assertEquals(TOPIC_NAME, item.topic()); assertNull(item.key()); assertEquals(LOG_MESSAGE, new String(item.value(), StandardCharsets.UTF_8)); }
@Test public void testAppendWithLayout() throws Exception { final Appender appender = ctx.getRequiredAppender("KafkaAppenderWithLayout"); appender.append(createLogEvent()); final List<ProducerRecord<byte[], byte[]>> history = kafka.history(); assertEquals(1, history.size()); final ProducerRecord<byte[], byte[]> item = history.get(0); assertNotNull(item); assertEquals(TOPIC_NAME, item.topic()); assertNull(item.key()); assertEquals("[" + LOG_MESSAGE + "]", new String(item.value(), StandardCharsets.UTF_8)); }
@Test public void testAppendWithSerializedLayout() throws Exception { final Appender appender = ctx.getRequiredAppender("KafkaAppenderWithSerializedLayout"); final LogEvent logEvent = createLogEvent(); appender.append(logEvent); final List<ProducerRecord<byte[], byte[]>> history = kafka.history(); assertEquals(1, history.size()); final ProducerRecord<byte[], byte[]> item = history.get(0); assertNotNull(item); assertEquals(TOPIC_NAME, item.topic()); assertNull(item.key()); assertEquals(LOG_MESSAGE, deserializeLogEvent(item.value()).getMessage().getFormattedMessage()); }