@Override public void collect(Event evt) { byte[] serialized = serializer.serialize(evt); producer.send(new ProducerRecord<>(topic, partition, null, serialized)); }
/** * computes partition for given record. * if the record has partition returns the value otherwise * calls configured partitioner class to compute the partition. */ private int partition(ProducerRecord<K, V> record, byte[] serializedKey, byte[] serializedValue, Cluster cluster) { Integer partition = record.partition(); return partition != null ? partition : partitioner.partition( record.topic(), record.key(), serializedKey, record.value(), serializedValue, cluster); }
@Override public Future<RecordMetadata> send(ProducerRecord<K, V> producerRecord, Callback callback) { com.twitter.util.Future<DLSN> dlsnFuture; if (null == producerRecord.key()) { dlsnFuture = getUnpartitionedMultiWriter(producerRecord.topic()).write(producerRecord.value()); } else { // TODO: be able to publish to a specific partition dlsnFuture = getPartitionedMultiWriter(producerRecord.topic()).write(producerRecord.key(), producerRecord.value()); } return new DLFutureRecordMetadata(producerRecord.topic(), dlsnFuture, callback); }
/** * This is called when client sends the record to KafkaProducer, before key and value gets serialized. * The method calls {@link ProducerInterceptor#onSend(ProducerRecord)} method. ProducerRecord * returned from the first interceptor's onSend() is passed to the second interceptor onSend(), and so on in the * interceptor chain. The record returned from the last interceptor is returned from this method. * * This method does not throw exceptions. Exceptions thrown by any of interceptor methods are caught and ignored. * If an interceptor in the middle of the chain, that normally modifies the record, throws an exception, * the next interceptor in the chain will be called with a record returned by the previous interceptor that did not * throw an exception. * * @param record the record from client * @return producer record to send to topic/partition */ public ProducerRecord<K, V> onSend(ProducerRecord<K, V> record) { ProducerRecord<K, V> interceptRecord = record; for (ProducerInterceptor<K, V> interceptor : this.interceptors) { try { interceptRecord = interceptor.onSend(interceptRecord); } catch (Exception e) { // do not propagate interceptor exception, log and continue calling other interceptors // be careful not to throw exception from here if (record != null) log.warn("Error executing interceptor onSend callback for topic: {}, partition: {}", record.topic(), record.partition(), e); else log.warn("Error executing interceptor onSend callback", e); } } return interceptRecord; }
@Test public void testEqualsAndHashCode() { ProducerRecord<String, Integer> producerRecord = new ProducerRecord<>("test", 1, "key", 1); assertEquals(producerRecord, producerRecord); assertEquals(producerRecord.hashCode(), producerRecord.hashCode()); ProducerRecord<String, Integer> equalRecord = new ProducerRecord<>("test", 1, "key", 1); assertEquals(producerRecord, equalRecord); assertEquals(producerRecord.hashCode(), equalRecord.hashCode()); ProducerRecord<String, Integer> topicMisMatch = new ProducerRecord<>("test-1", 1, "key", 1); assertFalse(producerRecord.equals(topicMisMatch)); ProducerRecord<String, Integer> partitionMismatch = new ProducerRecord<>("test", 2, "key", 1); assertFalse(producerRecord.equals(partitionMismatch)); ProducerRecord<String, Integer> keyMisMatch = new ProducerRecord<>("test", 1, "key-1", 1); assertFalse(producerRecord.equals(keyMisMatch)); ProducerRecord<String, Integer> valueMisMatch = new ProducerRecord<>("test", 1, "key", 2); assertFalse(producerRecord.equals(valueMisMatch)); ProducerRecord<String, Integer> nullFieldsRecord = new ProducerRecord<>("topic", null, null, null, null, null); assertEquals(nullFieldsRecord, nullFieldsRecord); assertEquals(nullFieldsRecord.hashCode(), nullFieldsRecord.hashCode()); }
public void start() throws InterruptedException { RandomGenerator random = RandomManager.getRandom(); Properties props = ConfigUtils.keyValueToProperties( "bootstrap.servers", "localhost:" + kafkaPort, "key.serializer", "org.apache.kafka.common.serialization.StringSerializer", "value.serializer", "org.apache.kafka.common.serialization.StringSerializer", "compression.type", "gzip", "linger.ms", 0, "batch.size", 0, "acks", 1, "max.request.size", 1 << 26 // TODO ); try (Producer<String,String> producer = new KafkaProducer<>(props)) { for (int i = 0; i < howMany; i++) { Pair<String,String> datum = datumGenerator.generate(i, random); ProducerRecord<String,String> record = new ProducerRecord<>(topic, datum.getFirst(), datum.getSecond()); producer.send(record); log.debug("Sent datum {} = {}", record.key(), record.value()); if (intervalMsec > 0) { Thread.sleep(intervalMsec); } } } }
ProducerRecord<String, String> record = new ProducerRecord<>(topic, key, value); record.headers().add(new RecordHeader("test", "header2".getBytes())); producer.send(record, null); record.headers().add(new RecordHeader("test", "test".getBytes())); fail("Expected IllegalStateException to be raised"); } catch (IllegalStateException ise) { assertArrayEquals(record.headers().lastHeader("test").value(), "header2".getBytes()); verify(valueSerializer).serialize(topic, record.headers(), value); verify(keySerializer).serialize(topic, record.headers(), key);
new TopicPartition(producerRecord.topic(), producerRecord.partition()), new KafkaException("Test")); assertEquals(1, onErrorAckCount); ProducerRecord<Integer, String> record2 = new ProducerRecord<>("test2", null, 1, "value"); interceptors.onSendError(record2, null, new KafkaException("Test")); assertEquals(3, onErrorAckCount); int reassignedPartition = producerRecord.partition() + 1; interceptors.onSendError(record2, new TopicPartition(record2.topic(), reassignedPartition), new KafkaException("Test")); assertEquals(4, onErrorAckCount);
@Override public void sendAsync(RowMap r, AbstractAsyncProducer.CallbackCompleter cc) throws Exception { ProducerRecord<String, String> record = makeProducerRecord(r); /* if debug logging isn't enabled, release the reference to `value`, which can ease memory pressure somewhat */ String value = KafkaCallback.LOGGER.isDebugEnabled() ? record.value() : null; KafkaCallback callback = new KafkaCallback(cc, r.getNextPosition(), record.key(), value, this.succeededMessageCount, this.failedMessageCount, this.succeededMessageMeter, this.failedMessageMeter, this.context); sendAsync(record, callback); }
span = tracer.nextSpan(kafkaTracing.extractAndClearHeaders(record.headers())); } else { span.kind(Span.Kind.PRODUCER).name("send"); if (remoteServiceName != null) span.remoteServiceName(remoteServiceName); if (record.key() instanceof String && !"".equals(record.key())) { span.tag(KafkaTags.KAFKA_KEY_TAG, record.key().toString()); span.tag(KafkaTags.KAFKA_TOPIC_TAG, record.topic()); span.start(); injector.inject(span.context(), record.headers());
@Override public Future<RecordMetadata> send(ProducerRecord<K, V> record, final Callback callback) { boolean error = errorManager.nextError(record.value()); if (errorManager.nextError(record.value())) { final Exception e = new Exception(); callback.onCompletion(null, e); return nullFuture; } else { return super.send(record, callback); } }
private void collect(final ProducerRecord record, final boolean isError) { collect(isError, record.topic().toLowerCase()); }
@Test public void testSimple() { MockProducer<String, String> producer = new MockProducer<>(Cluster.empty(), false, null, null, null); KafkaBolt<String, String> bolt = makeBolt(producer); OutputCollector collector = mock(OutputCollector.class); TopologyContext context = mock(TopologyContext.class); Map<String, Object> conf = new HashMap<>(); bolt.prepare(conf, context, collector); String key = "KEY"; String value = "VALUE"; Tuple testTuple = createTestTuple(key, value); bolt.execute(testTuple); assertThat(producer.history().size(), is(1)); ProducerRecord<String, String> arg = producer.history().get(0); LOG.info("GOT {} ->", arg); LOG.info("{}, {}, {}", arg.topic(), arg.key(), arg.value()); assertThat(arg.topic(), is("MY_TOPIC")); assertThat(arg.key(), is(key)); assertThat(arg.value(), is(value)); // Complete the send producer.completeNext(); verify(collector).ack(testTuple); }
@Test public void testWithKeySelector() { final KafkaStructuredLoggingServiceExposed service = new KafkaStructuredLoggingServiceExposed( producer, (res, log) -> log.name.getBytes(), false); final SimpleStructuredLog log = new SimpleStructuredLog("kawamuray"); service.writeLog(null, log); verify(producer, times(1)).send(captor.capture(), any(Callback.class)); final ProducerRecord<byte[], SimpleStructuredLog> record = captor.getValue(); assertThat(record.key()).isNotNull(); assertThat(new String(record.key())).isEqualTo(log.name); assertThat(record.value()).isEqualTo(log); }
/** * This method is called when sending the record fails in {@link ProducerInterceptor#onSend * (ProducerRecord)} method. This method calls {@link ProducerInterceptor#onAcknowledgement(RecordMetadata, Exception)} * method for each interceptor * * @param record The record from client * @param interceptTopicPartition The topic/partition for the record if an error occurred * after partition gets assigned; the topic part of interceptTopicPartition is the same as in record. * @param exception The exception thrown during processing of this record. */ public void onSendError(ProducerRecord<K, V> record, TopicPartition interceptTopicPartition, Exception exception) { for (ProducerInterceptor<K, V> interceptor : this.interceptors) { try { if (record == null && interceptTopicPartition == null) { interceptor.onAcknowledgement(null, exception); } else { if (interceptTopicPartition == null) { interceptTopicPartition = new TopicPartition(record.topic(), record.partition() == null ? RecordMetadata.UNKNOWN_PARTITION : record.partition()); } interceptor.onAcknowledgement(new RecordMetadata(interceptTopicPartition, -1, -1, RecordBatch.NO_TIMESTAMP, Long.valueOf(-1L), -1, -1), exception); } } catch (Exception e) { // do not propagate interceptor exceptions, just log log.warn("Error executing interceptor onAcknowledgement callback", e); } } }
@Override public Future<RecordMetadata> send(ProducerRecord<String, String> record, Callback callback) { TopicPartition tp = new TopicPartition(record.topic(), 0); RecordMetadata rm = new RecordMetadata(tp, -1L, -1L, 1L, 2L, 3, 4); if (callback != null) callback.onCompletion(rm, null); return Futures.immediateFuture(rm); }
/** * Push all byte array messages to the Kafka topic. * @param messages List of byte array messages to push to Kakfa. */ public void pushMessages(List<byte[]> messages) { for (byte[] message: messages) { producer.send(new ProducerRecord<>(topic, message), (recordMetadata, e) -> { if (e != null) { log.error("Failed to send message to topic {} due to exception: ", topic, e); } }); } }
/** * computes partition for given record. */ private int partition(ProducerRecord<K, V> record, Cluster cluster) { Integer partition = record.partition(); String topic = record.topic(); if (partition != null) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); // they have given us a partition, use it if (partition < 0 || partition >= numPartitions) throw new IllegalArgumentException("Invalid partition given with record: " + partition + " is not in the range [0..." + numPartitions + "]."); return partition; } byte[] keyBytes = keySerializer.serialize(topic, record.headers(), record.key()); byte[] valueBytes = valueSerializer.serialize(topic, record.headers(), record.value()); return this.partitioner.partition(topic, record.key(), keyBytes, record.value(), valueBytes, cluster); }
@Test public void testSimpleWithError() { MockProducer<String, String> producer = new MockProducer<>(Cluster.empty(), false, null, null, null); KafkaBolt<String, String> bolt = makeBolt(producer); OutputCollector collector = mock(OutputCollector.class); TopologyContext context = mock(TopologyContext.class); Map<String, Object> conf = new HashMap<>(); bolt.prepare(conf, context, collector); String key = "KEY"; String value = "VALUE"; Tuple testTuple = createTestTuple(key, value); bolt.execute(testTuple); assertThat(producer.history().size(), is(1)); ProducerRecord<String, String> arg = producer.history().get(0); LOG.info("GOT {} ->", arg); LOG.info("{}, {}, {}", arg.topic(), arg.key(), arg.value()); assertThat(arg.topic(), is("MY_TOPIC")); assertThat(arg.key(), is(key)); assertThat(arg.value(), is(value)); // Force a send error KafkaException ex = new KafkaException(); producer.errorNext(ex); verify(collector).reportError(ex); verify(collector).fail(testTuple); }