public Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback) { ProducerRecord<K, V> interceptedRecord = this.interceptors.onSend(record); return doSend(interceptedRecord, callback);
@Test public void testInterceptorPartitionSetOnTooLargeRecord() { Map<String, Object> configs = new HashMap<>(); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); configs.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, "1"); String topic = "topic"; ProducerRecord<String, String> record = new ProducerRecord<>(topic, "value"); Metadata metadata = new Metadata(0, 90000, true); MetadataResponse initialUpdateResponse = TestUtils.metadataUpdateWith(1, singletonMap(topic, 1)); metadata.update(initialUpdateResponse, Time.SYSTEM.milliseconds()); @SuppressWarnings("unchecked") // it is safe to suppress, since this is a mock class ProducerInterceptors<String, String> interceptors = mock(ProducerInterceptors.class); KafkaProducer<String, String> producer = new KafkaProducer<>(configs, new StringSerializer(), new StringSerializer(), metadata, null, interceptors, Time.SYSTEM); when(interceptors.onSend(any())).then(invocation -> invocation.getArgument(0)); producer.send(record); verify(interceptors).onSend(record); verify(interceptors).onSendError(eq(record), notNull(), notNull()); producer.close(Duration.ofMillis(0)); }
ProducerRecord<Integer, String> interceptedRecord = interceptors.onSend(producerRecord); assertEquals(2, onSendCount); assertEquals(producerRecord.topic(), interceptedRecord.topic()); ProducerRecord<Integer, String> anotherRecord = interceptors.onSend(producerRecord); assertEquals(4, onSendCount); assertEquals(interceptedRecord, anotherRecord); ProducerRecord<Integer, String> partInterceptRecord = interceptors.onSend(producerRecord); assertEquals(6, onSendCount); assertEquals(partInterceptRecord.value(), producerRecord.value().concat("Two")); ProducerRecord<Integer, String> noInterceptRecord = interceptors.onSend(producerRecord); assertEquals(producerRecord, noInterceptRecord);