/** * This is called when client sends the record to KafkaProducer, before key and value gets serialized. * The method calls {@link ProducerInterceptor#onSend(ProducerRecord)} method. ProducerRecord * returned from the first interceptor's onSend() is passed to the second interceptor onSend(), and so on in the * interceptor chain. The record returned from the last interceptor is returned from this method. * * This method does not throw exceptions. Exceptions thrown by any of interceptor methods are caught and ignored. * If an interceptor in the middle of the chain, that normally modifies the record, throws an exception, * the next interceptor in the chain will be called with a record returned by the previous interceptor that did not * throw an exception. * * @param record the record from client * @return producer record to send to topic/partition */ public ProducerRecord<K, V> onSend(ProducerRecord<K, V> record) { ProducerRecord<K, V> interceptRecord = record; for (ProducerInterceptor<K, V> interceptor : this.interceptors) { try { interceptRecord = interceptor.onSend(interceptRecord); } catch (Exception e) { // do not propagate interceptor exception, log and continue calling other interceptors // be careful not to throw exception from here if (record != null) log.warn("Error executing interceptor onSend callback for topic: {}, partition: {}", record.topic(), record.partition(), e); else log.warn("Error executing interceptor onSend callback", e); } } return interceptRecord; }
/** * This method is called when sending the record fails in {@link ProducerInterceptor#onSend * (ProducerRecord)} method. This method calls {@link ProducerInterceptor#onAcknowledgement(RecordMetadata, Exception)} * method for each interceptor * * @param record The record from client * @param interceptTopicPartition The topic/partition for the record if an error occurred * after partition gets assigned; the topic part of interceptTopicPartition is the same as in record. * @param exception The exception thrown during processing of this record. */ public void onSendError(ProducerRecord<K, V> record, TopicPartition interceptTopicPartition, Exception exception) { for (ProducerInterceptor<K, V> interceptor : this.interceptors) { try { if (record == null && interceptTopicPartition == null) { interceptor.onAcknowledgement(null, exception); } else { if (interceptTopicPartition == null) { interceptTopicPartition = new TopicPartition(record.topic(), record.partition() == null ? RecordMetadata.UNKNOWN_PARTITION : record.partition()); } interceptor.onAcknowledgement(new RecordMetadata(interceptTopicPartition, -1, -1, RecordBatch.NO_TIMESTAMP, Long.valueOf(-1L), -1, -1), exception); } } catch (Exception e) { // do not propagate interceptor exceptions, just log log.warn("Error executing interceptor onAcknowledgement callback", e); } } }
/** * computes partition for given record. * if the record has partition returns the value otherwise * calls configured partitioner class to compute the partition. */ private int partition(ProducerRecord<K, V> record, byte[] serializedKey, byte[] serializedValue, Cluster cluster) { Integer partition = record.partition(); return partition != null ? partition : partitioner.partition( record.topic(), record.key(), serializedKey, record.value(), serializedValue, cluster); }
@Override public ProducerRecord<String, String> onSend(ProducerRecord<String, String> record) { ONSEND_COUNT.incrementAndGet(); return new ProducerRecord<>( record.topic(), record.partition(), record.key(), record.value().concat(appendStr)); }
@Override public ProducerRecord<Integer, String> onSend(ProducerRecord<Integer, String> record) { onSendCount++; if (throwExceptionOnSend) throw new KafkaException("Injected exception in AppendProducerInterceptor.onSend"); return new ProducerRecord<>( record.topic(), record.partition(), record.key(), record.value().concat(appendStr)); }
/** * computes partition for given record. */ private int partition(ProducerRecord<K, V> record, Cluster cluster) { Integer partition = record.partition(); String topic = record.topic(); if (partition != null) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); // they have given us a partition, use it if (partition < 0 || partition >= numPartitions) throw new IllegalArgumentException("Invalid partition given with record: " + partition + " is not in the range [0..." + numPartitions + "]."); return partition; } byte[] keyBytes = keySerializer.serialize(topic, record.headers(), record.key()); byte[] valueBytes = valueSerializer.serialize(topic, record.headers(), record.value()); return this.partitioner.partition(topic, record.key(), keyBytes, record.value(), valueBytes, cluster); }
clusterAndWaitTime = waitOnMetadata(record.topic(), record.partition(), maxBlockTimeMs); } catch (KafkaException e) { if (metadata.isClosed())
assertEquals(2, onSendCount); assertEquals(producerRecord.topic(), interceptedRecord.topic()); assertEquals(producerRecord.partition(), interceptedRecord.partition()); assertEquals(producerRecord.key(), interceptedRecord.key()); assertEquals(interceptedRecord.value(), producerRecord.value().concat("One").concat("Two"));
/** * Invoked after an attempt to send a message has failed. * @param producerRecord the failed record * @param exception the exception thrown */ default void onError(ProducerRecord<K, V> producerRecord, Exception exception) { onError(producerRecord.topic(), producerRecord.partition(), producerRecord.key(), producerRecord.value(), exception); }
/** * Invoked after the successful send of a message (that is, after it has been acknowledged by the broker). * @param producerRecord the actual sent record * @param recordMetadata the result of the successful send operation */ default void onSuccess(ProducerRecord<K, V> producerRecord, RecordMetadata recordMetadata) { onSuccess(producerRecord.topic(), producerRecord.partition(), producerRecord.key(), producerRecord.value(), recordMetadata); }
public static ProducerRecord prepareRecordToSend(String topicName, ProducerRecord recordToSend) { return new ProducerRecord(topicName, recordToSend.partition(), recordToSend.timestamp(), recordToSend.key(), recordToSend.value()); }
@Override @SuppressWarnings({"unchecked", "rawtypes"}) public boolean process(Exchange exchange, AsyncCallback callback) { try { Iterator<ProducerRecord> c = createRecorder(exchange); KafkaProducerCallBack cb = new KafkaProducerCallBack(exchange, callback); while (c.hasNext()) { cb.increment(); ProducerRecord rec = c.next(); if (log.isDebugEnabled()) { log.debug("Sending message to topic: {}, partition: {}, key: {}", rec.topic(), rec.partition(), rec.key()); } kafkaProducer.send(rec, cb); } return cb.allSent(); } catch (Exception ex) { exchange.setException(ex); } callback.done(true); return true; }
/** * Converts a {@link ProducerRecord} a {@link SenderRecord} to send to Kafka. * * @param record the producer record to send to Kafka * @param correlationMetadata Additional correlation metadata that is not sent to Kafka, but is * included in the response to match {@link SenderResult} to this record. * @return new sender record that can be sent to Kafka using {@link KafkaSender#send(org.reactivestreams.Publisher)} */ public static <K, V, T> SenderRecord<K, V, T> create(ProducerRecord<K, V> record, T correlationMetadata) { return new SenderRecord<>(record.topic(), record.partition(), record.timestamp(), record.key(), record.value(), correlationMetadata, record.headers()); }
/** * Converts a {@link ProducerRecord} a {@link SenderRecord} to send to Kafka. * * @param record the producer record to send to Kafka * @param correlationMetadata Additional correlation metadata that is not sent to Kafka, but is * included in the response to match {@link SenderResult} to this record. * @return new sender record that can be sent to Kafka using {@link KafkaSender#send(org.reactivestreams.Publisher)} */ public static <K, V, T> SenderRecord<K, V, T> create(ProducerRecord<K, V> record, T correlationMetadata) { return new SenderRecord<>(record.topic(), record.partition(), record.timestamp(), record.key(), record.value(), correlationMetadata, record.headers()); }
public long appendMessage(ProducerRecord<Integer, String> record, boolean commit) { TopicPartition topicPartition = new TopicPartition(record.topic(), record.partition()); List<Message> log = log(topicPartition); if (log == null) throw new LeaderNotAvailableException("Partition not available: " + topicPartition); Message message = new Message(record.key(), record.value(), record.timestamp()); List<Message> uncommitted = uncommittedMessages.get(topicPartition); uncommitted.add(message); if (commit) commitTransaction(); return log.size() + uncommitted.size() - 1; }