protected PublisherPool createPublisherPool(final ProcessContext context) { final int maxMessageSize = context.getProperty(MAX_REQUEST_SIZE).asDataSize(DataUnit.B).intValue(); final long maxAckWaitMillis = context.getProperty(ACK_WAIT_TIME).asTimePeriod(TimeUnit.MILLISECONDS).longValue(); final Map<String, Object> kafkaProperties = new HashMap<>(); KafkaProcessorUtils.buildCommonKafkaProperties(context, ProducerConfig.class, kafkaProperties); kafkaProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); kafkaProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); kafkaProperties.put("max.request.size", String.valueOf(maxMessageSize)); return new PublisherPool(kafkaProperties, getLogger(), maxMessageSize, maxAckWaitMillis); }
final int successCount = publishResult.getSuccessfulMessageCount(failure); if (successCount > 0) { getLogger().error("Failed to send some messages for {} to Kafka, but {} messages were acknowledged by Kafka. Routing to failure due to {}", new Object[] {failure, successCount, publishResult.getReasonForFailure(failure)}); } else { getLogger().error("Failed to send all message for {} to Kafka; routing to failure due to {}", new Object[] {failure, publishResult.getReasonForFailure(failure)});
protected PublisherPool createPublisherPool(final ProcessContext context) { final int maxMessageSize = context.getProperty(MAX_REQUEST_SIZE).asDataSize(DataUnit.B).intValue(); final long maxAckWaitMillis = context.getProperty(ACK_WAIT_TIME).asTimePeriod(TimeUnit.MILLISECONDS).longValue(); final Map<String, Object> kafkaProperties = new HashMap<>(); KafkaProcessorUtils.buildCommonKafkaProperties(context, ProducerConfig.class, kafkaProperties); kafkaProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); kafkaProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); kafkaProperties.put("max.request.size", String.valueOf(maxMessageSize)); return new PublisherPool(kafkaProperties, getLogger(), maxMessageSize, maxAckWaitMillis); }
/** * Builds and instance of {@link KafkaPublisher}. */ @Override protected KafkaPublisher buildKafkaResource(ProcessContext context, ProcessSession session) { Properties kafkaProperties = this.buildKafkaProperties(context); kafkaProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); kafkaProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); this.brokers = context.getProperty(BOOTSTRAP_SERVERS).evaluateAttributeExpressions().getValue(); KafkaPublisher publisher = new KafkaPublisher(kafkaProperties, this.getLogger()); return publisher; }
/** * Will rendezvous with Kafka if {@link ProcessSession} contains {@link FlowFile} * producing a result {@link FlowFile}. * <br> * The result {@link FlowFile} that is successful is then transfered to {@link #REL_SUCCESS} * <br> * The result {@link FlowFile} that is failed is then transfered to {@link #REL_FAILURE} * */ @Override protected boolean rendezvousWithKafka(ProcessContext context, ProcessSession session){ FlowFile flowFile = session.get(); if (flowFile != null) { long start = System.nanoTime(); flowFile = this.doRendezvousWithKafka(flowFile, context, session); Relationship relationship = REL_SUCCESS; if (!this.isFailedFlowFile(flowFile)) { String topic = context.getProperty(TOPIC).evaluateAttributeExpressions(flowFile).getValue(); long executionDuration = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start); String transitUri = this.buildTransitURI(context.getProperty(SECURITY_PROTOCOL).getValue(), this.brokers, topic); session.getProvenanceReporter().send(flowFile, transitUri, "Sent " + flowFile.getAttribute(MSG_COUNT) + " Kafka messages", executionDuration); this.getLogger().info("Successfully sent {} to Kafka as {} message(s) in {} millis", new Object[] { flowFile, flowFile.getAttribute(MSG_COUNT), executionDuration }); } else { relationship = REL_FAILURE; flowFile = session.penalize(flowFile); } session.transfer(flowFile, relationship); } return flowFile != null; }
final int successCount = publishResult.getSuccessfulMessageCount(failure); if (successCount > 0) { getLogger().error("Failed to send some messages for {} to Kafka, but {} messages were acknowledged by Kafka. Routing to failure due to {}", new Object[] {failure, successCount, publishResult.getReasonForFailure(failure)}); } else { getLogger().error("Failed to send all message for {} to Kafka; routing to failure due to {}", new Object[] {failure, publishResult.getReasonForFailure(failure)});