@Override public void flush() { producer.flush(); }
@Override public void flush() throws IOException { this.producer.flush(); }
@Override public void flush() { delegate.flush(); }
@Override public void close() { synchronized (this) { if (producer == null) { return; } producer.flush(); producer.close(); producer = null; } }
@Override public void report(String profilerName, Map<String, Object> metrics) { ensureProducer(); String topicName = getTopic(profilerName); String str = JsonUtils.serialize(metrics); byte[] message = str.getBytes(StandardCharsets.UTF_8); Future<RecordMetadata> future = producer.send( new ProducerRecord<String, byte[]>(topicName, message)); if (syncMode) { producer.flush(); try { future.get(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } } }
public PublishResult complete() { if (tracker == null) { if (messagesSent.get() == 0L) { return PublishResult.EMPTY; } throw new IllegalStateException("Cannot complete publishing to Kafka because Publisher Lease was already closed"); } producer.flush(); try { tracker.awaitCompletion(maxAckWaitMillis); return tracker.createPublishResult(); } catch (final InterruptedException e) { logger.warn("Interrupted while waiting for an acknowledgement from Kafka; some FlowFiles may be transferred to 'failure' even though they were received by Kafka"); Thread.currentThread().interrupt(); return tracker.failOutstanding(e); } catch (final TimeoutException e) { logger.warn("Timed out while waiting for an acknowledgement from Kafka; some FlowFiles may be transferred to 'failure' even though they were received by Kafka"); return tracker.failOutstanding(e); } finally { tracker = null; } }
public PublishResult complete() { if (tracker == null) { throw new IllegalStateException("Cannot complete publishing to Kafka because Publisher Lease was already closed"); } producer.flush(); try { tracker.awaitCompletion(maxAckWaitMillis); return tracker.createPublishResult(); } catch (final InterruptedException e) { logger.warn("Interrupted while waiting for an acknowledgement from Kafka; some FlowFiles may be transferred to 'failure' even though they were received by Kafka"); Thread.currentThread().interrupt(); return tracker.failOutstanding(e); } catch (final TimeoutException e) { logger.warn("Timed out while waiting for an acknowledgement from Kafka; some FlowFiles may be transferred to 'failure' even though they were received by Kafka"); return tracker.failOutstanding(e); } finally { tracker = null; } }
_producer.flush(); if (LOG.isDebugEnabled()) { LOG.debug("Stored {} partition metric samples and {} broker metric samples to Kafka",
public PublishResult complete() { if (tracker == null) { if (messagesSent.get() == 0L) { return PublishResult.EMPTY; } rollback(); throw new IllegalStateException("Cannot complete publishing to Kafka because Publisher Lease was already closed"); } producer.flush(); if (activeTransaction) { producer.commitTransaction(); activeTransaction = false; } try { tracker.awaitCompletion(maxAckWaitMillis); return tracker.createPublishResult(); } catch (final InterruptedException e) { logger.warn("Interrupted while waiting for an acknowledgement from Kafka; some FlowFiles may be transferred to 'failure' even though they were received by Kafka"); Thread.currentThread().interrupt(); return tracker.failOutstanding(e); } catch (final TimeoutException e) { logger.warn("Timed out while waiting for an acknowledgement from Kafka; some FlowFiles may be transferred to 'failure' even though they were received by Kafka"); return tracker.failOutstanding(e); } finally { tracker = null; } }
public PublishResult complete() { if (tracker == null) { if (messagesSent.get() == 0L) { return PublishResult.EMPTY; } rollback(); throw new IllegalStateException("Cannot complete publishing to Kafka because Publisher Lease was already closed"); } producer.flush(); if (activeTransaction) { producer.commitTransaction(); activeTransaction = false; } try { tracker.awaitCompletion(maxAckWaitMillis); return tracker.createPublishResult(); } catch (final InterruptedException e) { logger.warn("Interrupted while waiting for an acknowledgement from Kafka; some FlowFiles may be transferred to 'failure' even though they were received by Kafka"); Thread.currentThread().interrupt(); return tracker.failOutstanding(e); } catch (final TimeoutException e) { logger.warn("Timed out while waiting for an acknowledgement from Kafka; some FlowFiles may be transferred to 'failure' even though they were received by Kafka"); return tracker.failOutstanding(e); } finally { tracker = null; } }
public PublishResult complete() { if (tracker == null) { if (messagesSent.get() == 0L) { return PublishResult.EMPTY; } rollback(); throw new IllegalStateException("Cannot complete publishing to Kafka because Publisher Lease was already closed"); } producer.flush(); if (activeTransaction) { producer.commitTransaction(); activeTransaction = false; } try { tracker.awaitCompletion(maxAckWaitMillis); return tracker.createPublishResult(); } catch (final InterruptedException e) { logger.warn("Interrupted while waiting for an acknowledgement from Kafka; some FlowFiles may be transferred to 'failure' even though they were received by Kafka"); Thread.currentThread().interrupt(); return tracker.failOutstanding(e); } catch (final TimeoutException e) { logger.warn("Timed out while waiting for an acknowledgement from Kafka; some FlowFiles may be transferred to 'failure' even though they were received by Kafka"); return tracker.failOutstanding(e); } finally { tracker = null; } }
@Override public void flush() { this.delegate.flush(); }
/** * {@inheritDoc} * <p><b>Note</b> It only makes sense to invoke this method if the * {@link ProducerFactory} serves up a singleton producer (such as the * {@link DefaultKafkaProducerFactory}). */ @Override public void flush() { Producer<K, V> producer = getTheProducer(); try { producer.flush(); } finally { closeProducer(producer, inTransaction()); } }
/** * @param topic Kafka topic to write the data records to * @param records Data records to write to Kafka * @param producerConfig Kafka producer configuration * @param <K> Key type of the data records * @param <V> Value type of the data records */ public static <K, V> void produceKeyValuesSynchronously( String topic, Collection<KeyValue<K, V>> records, Properties producerConfig) throws ExecutionException, InterruptedException { Producer<K, V> producer = new KafkaProducer<>(producerConfig); for (KeyValue<K, V> record : records) { Future<RecordMetadata> f = producer.send( new ProducerRecord<>(topic, record.key, record.value)); f.get(); } producer.flush(); producer.close(); }
/** * Flush the underlying producer to ensure that all pending writes have been sent. */ public void flush() { producer.flush(); }
@Override public void flush() { log.debug("Flushing producer"); producer.flush(); checkForException(); }
@Override public void close() { if (producer != null) { producer.flush(); producer.close(); } } }
@Override public void fromCollection(final Collection<VisibilityStatement> statements) throws RyaStreamsException { requireNonNull(statements); for(final VisibilityStatement statement : statements) { producer.send(new ProducerRecord<>(topic, statement)); } producer.flush(); } }
/** * Tests invocation of methods on KafkaProducer using {@link KafkaSender#doOnProducer(java.util.function.Function)} */ @Test public void producerMethods() { testProducerMethod(p -> assertEquals(0, p.metrics().size())); testProducerMethod(p -> assertEquals(2, p.partitionsFor(topic).size())); testProducerMethod(p -> p.flush()); }