@Override public void close() { producer.close(); } }
@Override public void close(long timeout, TimeUnit unit) { kafkaProducer.close(timeout, unit); }
@Override public void close(long timeout, TimeUnit unit) { kafkaProducer.close(timeout, unit); }
@Override public void close() { kafkaProducer.close(); }
@Override public void close() { kafkaProducer.close(); }
/** * This method waits up to <code>timeout</code> for the producer to complete the sending of all incomplete requests. * <p> * If the producer is unable to complete all requests before the timeout expires, this method will fail * any unsent and unacknowledged records immediately. * <p> * If invoked from within a {@link Callback} this method will not block and will be equivalent to * <code>close(Duration.ofMillis(0))</code>. This is done since no further sending will happen while * blocking the I/O thread of the producer. * * @param timeout The maximum time to wait for producer to complete any pending requests. The value should be * non-negative. Specifying a timeout of zero means do not wait for pending send requests to complete. * @throws InterruptException If the thread is interrupted while blocked * @throws IllegalArgumentException If the <code>timeout</code> is negative. * */ @Override public void close(Duration timeout) { close(timeout, false); }
@Override public void stop() throws Exception { kafkaProducer.close(); }
/** * Close this producer. This method blocks until all previously sent requests complete. * This method is equivalent to <code>close(Long.MAX_VALUE, TimeUnit.MILLISECONDS)</code>. * <p> * <strong>If close() is called from {@link Callback}, a warning message will be logged and close(0, TimeUnit.MILLISECONDS) * will be called instead. We do this because the sender thread would otherwise try to join itself and * block forever.</strong> * <p> * * @throws InterruptException If the thread is interrupted while blocked */ @Override public void close() { close(Duration.ofMillis(Long.MAX_VALUE)); }
@Override public void close() throws Exception { if (producer != null) { producer.close(); } // make sure we propagate pending errors checkErroneous(); }
private void checkExceptions() throws IOException { if (sendExceptionRef.get() != null) { LOG.error("Send Exception Aborting write from writerId [{}]", writerId); producer.close(0, TimeUnit.MICROSECONDS); throw new IOException(sendExceptionRef.get()); } }
private void closeProducer() { producer.close(); } }
@Override public void close() { kafkaProducer.close(); } };
@Override public synchronized void stop() { producer.close(); counter.stop(); logger.info("Kafka Sink {} stopped. Metrics: {}", getName(), counter); super.stop(); }
@Test public void testOsDefaultSocketBufferSizes() { Map<String, Object> config = new HashMap<>(); config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); config.put(ProducerConfig.SEND_BUFFER_CONFIG, Selectable.USE_DEFAULT_BUFFER_SIZE); config.put(ProducerConfig.RECEIVE_BUFFER_CONFIG, Selectable.USE_DEFAULT_BUFFER_SIZE); new KafkaProducer<>(config, new ByteArraySerializer(), new ByteArraySerializer()).close(); }
@Test public void testSerializerClose() { Map<String, Object> configs = new HashMap<>(); configs.put(ProducerConfig.CLIENT_ID_CONFIG, "testConstructorClose"); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); configs.put(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName()); configs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL); final int oldInitCount = MockSerializer.INIT_COUNT.get(); final int oldCloseCount = MockSerializer.CLOSE_COUNT.get(); KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>( configs, new MockSerializer(), new MockSerializer()); assertEquals(oldInitCount + 2, MockSerializer.INIT_COUNT.get()); assertEquals(oldCloseCount, MockSerializer.CLOSE_COUNT.get()); producer.close(); assertEquals(oldInitCount + 2, MockSerializer.INIT_COUNT.get()); assertEquals(oldCloseCount + 2, MockSerializer.CLOSE_COUNT.get()); }
@Test public void testConstructorWithSerializers() { Properties producerProps = new Properties(); producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); new KafkaProducer<>(producerProps, new ByteArraySerializer(), new ByteArraySerializer()).close(); }
@Test public void testMetricsReporterAutoGeneratedClientId() { Properties props = new Properties(); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); props.setProperty(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName()); KafkaProducer<String, String> producer = new KafkaProducer<>( props, new StringSerializer(), new StringSerializer()); MockMetricsReporter mockMetricsReporter = (MockMetricsReporter) producer.metrics.reporters().get(0); Assert.assertEquals(producer.getClientId(), mockMetricsReporter.clientId); producer.close(); }
@Test public void testInterceptorPartitionSetOnTooLargeRecord() { Map<String, Object> configs = new HashMap<>(); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); configs.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, "1"); String topic = "topic"; ProducerRecord<String, String> record = new ProducerRecord<>(topic, "value"); Metadata metadata = new Metadata(0, 90000, true); MetadataResponse initialUpdateResponse = TestUtils.metadataUpdateWith(1, singletonMap(topic, 1)); metadata.update(initialUpdateResponse, Time.SYSTEM.milliseconds()); @SuppressWarnings("unchecked") // it is safe to suppress, since this is a mock class ProducerInterceptors<String, String> interceptors = mock(ProducerInterceptors.class); KafkaProducer<String, String> producer = new KafkaProducer<>(configs, new StringSerializer(), new StringSerializer(), metadata, null, interceptors, Time.SYSTEM); when(interceptors.onSend(any())).then(invocation -> invocation.getArgument(0)); producer.send(record); verify(interceptors).onSend(record); verify(interceptors).onSendError(eq(record), notNull(), notNull()); producer.close(Duration.ofMillis(0)); }
@Test public void testInterceptorConstructClose() { try { Properties props = new Properties(); // test with client ID assigned by KafkaProducer props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); props.setProperty(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, MockProducerInterceptor.class.getName()); props.setProperty(MockProducerInterceptor.APPEND_STRING_PROP, "something"); KafkaProducer<String, String> producer = new KafkaProducer<>( props, new StringSerializer(), new StringSerializer()); assertEquals(1, MockProducerInterceptor.INIT_COUNT.get()); assertEquals(0, MockProducerInterceptor.CLOSE_COUNT.get()); // Cluster metadata will only be updated on calling onSend. Assert.assertNull(MockProducerInterceptor.CLUSTER_META.get()); producer.close(); assertEquals(1, MockProducerInterceptor.INIT_COUNT.get()); assertEquals(1, MockProducerInterceptor.CLOSE_COUNT.get()); } finally { // cleanup since we are using mutable static variables in MockProducerInterceptor MockProducerInterceptor.resetCounters(); } }
@Test public void testPartitionerClose() { try { Properties props = new Properties(); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); MockPartitioner.resetCounters(); props.setProperty(ProducerConfig.PARTITIONER_CLASS_CONFIG, MockPartitioner.class.getName()); KafkaProducer<String, String> producer = new KafkaProducer<>( props, new StringSerializer(), new StringSerializer()); assertEquals(1, MockPartitioner.INIT_COUNT.get()); assertEquals(0, MockPartitioner.CLOSE_COUNT.get()); producer.close(); assertEquals(1, MockPartitioner.INIT_COUNT.get()); assertEquals(1, MockPartitioner.CLOSE_COUNT.get()); } finally { // cleanup since we are using mutable static variables in MockPartitioner MockPartitioner.resetCounters(); } }