@Test public void testOnAcknowledgementChain() { List<ProducerInterceptor<Integer, String>> interceptorList = new ArrayList<>(); // we are testing two different interceptors by configuring the same interceptor differently, which is not // how it would be done in KafkaProducer, but ok for testing interceptor callbacks AppendProducerInterceptor interceptor1 = new AppendProducerInterceptor("One"); AppendProducerInterceptor interceptor2 = new AppendProducerInterceptor("Two"); interceptorList.add(interceptor1); interceptorList.add(interceptor2); ProducerInterceptors<Integer, String> interceptors = new ProducerInterceptors<>(interceptorList); // verify onAck is called on all interceptors RecordMetadata meta = new RecordMetadata(tp, 0, 0, 0, Long.valueOf(0L), 0, 0); interceptors.onAcknowledgement(meta, null); assertEquals(2, onAckCount); // verify that onAcknowledgement exceptions do not propagate interceptor1.injectOnAcknowledgementError(true); interceptors.onAcknowledgement(meta, null); assertEquals(4, onAckCount); interceptor2.injectOnAcknowledgementError(true); interceptors.onAcknowledgement(meta, null); assertEquals(6, onAckCount); interceptors.close(); }
public Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback) { ProducerRecord<K, V> interceptedRecord = this.interceptors.onSend(record); return doSend(interceptedRecord, callback);
AppendProducerInterceptor interceptor1 = new AppendProducerInterceptor("One"); interceptorList.add(interceptor1); ProducerInterceptors<Integer, String> interceptors = new ProducerInterceptors<>(interceptorList); interceptors.onSendError(producerRecord, new TopicPartition(producerRecord.topic(), producerRecord.partition()), new KafkaException("Test")); interceptors.onSendError(producerRecord, null, new KafkaException("Test")); assertEquals(2, onErrorAckCount); assertEquals(2, onErrorAckWithTopicPartitionSetCount); interceptors.onSendError(record2, null, new KafkaException("Test")); assertEquals(3, onErrorAckCount); assertEquals(3, onErrorAckWithTopicSetCount); interceptors.onSendError(record2, new TopicPartition(record2.topic(), reassignedPartition), new KafkaException("Test")); interceptors.onSendError(null, null, new KafkaException("Test")); assertEquals(5, onErrorAckCount); assertEquals(4, onErrorAckWithTopicSetCount); assertEquals(3, onErrorAckWithTopicPartitionSetCount); interceptors.close();
interceptorList.add(interceptor1); interceptorList.add(interceptor2); ProducerInterceptors<Integer, String> interceptors = new ProducerInterceptors<>(interceptorList); ProducerRecord<Integer, String> interceptedRecord = interceptors.onSend(producerRecord); assertEquals(2, onSendCount); assertEquals(producerRecord.topic(), interceptedRecord.topic()); ProducerRecord<Integer, String> anotherRecord = interceptors.onSend(producerRecord); assertEquals(4, onSendCount); assertEquals(interceptedRecord, anotherRecord); ProducerRecord<Integer, String> partInterceptRecord = interceptors.onSend(producerRecord); assertEquals(6, onSendCount); assertEquals(partInterceptRecord.value(), producerRecord.value().concat("Two")); ProducerRecord<Integer, String> noInterceptRecord = interceptors.onSend(producerRecord); assertEquals(producerRecord, noInterceptRecord); interceptors.close();
@Test public void testInterceptorPartitionSetOnTooLargeRecord() { Map<String, Object> configs = new HashMap<>(); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); configs.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, "1"); String topic = "topic"; ProducerRecord<String, String> record = new ProducerRecord<>(topic, "value"); Metadata metadata = new Metadata(0, 90000, true); MetadataResponse initialUpdateResponse = TestUtils.metadataUpdateWith(1, singletonMap(topic, 1)); metadata.update(initialUpdateResponse, Time.SYSTEM.milliseconds()); @SuppressWarnings("unchecked") // it is safe to suppress, since this is a mock class ProducerInterceptors<String, String> interceptors = mock(ProducerInterceptors.class); KafkaProducer<String, String> producer = new KafkaProducer<>(configs, new StringSerializer(), new StringSerializer(), metadata, null, interceptors, Time.SYSTEM); when(interceptors.onSend(any())).then(invocation -> invocation.getArgument(0)); producer.send(record); verify(interceptors).onSend(record); verify(interceptors).onSendError(eq(record), notNull(), notNull()); producer.close(Duration.ofMillis(0)); }
public void onCompletion(RecordMetadata metadata, Exception exception) { metadata = metadata != null ? metadata : new RecordMetadata(tp, -1, -1, RecordBatch.NO_TIMESTAMP, Long.valueOf(-1L), -1, -1); this.interceptors.onAcknowledgement(metadata, exception); if (this.userCallback != null) this.userCallback.onCompletion(metadata, exception); } }
callback.onCompletion(null, e); this.errors.record(); this.interceptors.onSendError(record, tp, e); return new FutureFailure(e); } catch (InterruptedException e) { this.errors.record(); this.interceptors.onSendError(record, tp, e); throw new InterruptException(e); } catch (BufferExhaustedException e) { this.errors.record(); this.metrics.sensor("buffer-exhausted-records").record(); this.interceptors.onSendError(record, tp, e); throw e; } catch (KafkaException e) { this.errors.record(); this.interceptors.onSendError(record, tp, e); throw e; } catch (Exception e) { this.interceptors.onSendError(record, tp, e); throw e;
this.interceptors = interceptors; else this.interceptors = new ProducerInterceptors<>(interceptorList); ClusterResourceListeners clusterResourceListeners = configureClusterResourceListeners(keySerializer, valueSerializer, interceptorList, reporters);