private void populateAttributes(final BundleTracker tracker) { final Map<String, String> kafkaAttrs = new HashMap<>(); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(tracker.initialOffset)); if (tracker.key != null && tracker.totalRecords == 1) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_KEY, tracker.key); } kafkaAttrs.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(tracker.partition)); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_TOPIC, tracker.topic); if (tracker.totalRecords > 1) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_COUNT, String.valueOf(tracker.totalRecords)); } final FlowFile newFlowFile = getProcessSession().putAllAttributes(tracker.flowFile, kafkaAttrs); final long executionDurationMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - leaseStartNanos); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, tracker.topic); getProcessSession().getProvenanceReporter().receive(newFlowFile, transitUri, executionDurationMillis); tracker.updateFlowFile(newFlowFile); }
private void populateAttributes(final BundleTracker tracker) { final Map<String, String> kafkaAttrs = new HashMap<>(); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(tracker.initialOffset)); if (tracker.key != null && tracker.totalRecords == 1) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_KEY, tracker.key); } kafkaAttrs.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(tracker.partition)); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_TOPIC, tracker.topic); if (tracker.totalRecords > 1) { // Add a record.count attribute to remain consistent with other record-oriented processors. If not // reading/writing records, then use "kafka.count" attribute. if (tracker.recordWriter == null) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_COUNT, String.valueOf(tracker.totalRecords)); } else { kafkaAttrs.put("record.count", String.valueOf(tracker.totalRecords)); } } final FlowFile newFlowFile = getProcessSession().putAllAttributes(tracker.flowFile, kafkaAttrs); final long executionDurationMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - leaseStartNanos); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, tracker.topic); getProcessSession().getProvenanceReporter().receive(newFlowFile, transitUri, executionDurationMillis); tracker.updateFlowFile(newFlowFile); }
private void populateAttributes(final BundleTracker tracker) { final Map<String, String> kafkaAttrs = new HashMap<>(); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(tracker.initialOffset)); if (tracker.key != null && tracker.totalRecords == 1) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_KEY, tracker.key); } kafkaAttrs.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(tracker.partition)); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_TOPIC, tracker.topic); if (tracker.totalRecords > 1) { // Add a record.count attribute to remain consistent with other record-oriented processors. If not // reading/writing records, then use "kafka.count" attribute. if (tracker.recordWriter == null) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_COUNT, String.valueOf(tracker.totalRecords)); } else { kafkaAttrs.put("record.count", String.valueOf(tracker.totalRecords)); } } final FlowFile newFlowFile = getProcessSession().putAllAttributes(tracker.flowFile, kafkaAttrs); final long executionDurationMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - leaseStartNanos); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, tracker.topic); getProcessSession().getProvenanceReporter().receive(newFlowFile, transitUri, executionDurationMillis); tracker.updateFlowFile(newFlowFile); }
private void populateAttributes(final BundleTracker tracker) { final Map<String, String> kafkaAttrs = new HashMap<>(); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(tracker.initialOffset)); if (tracker.key != null && tracker.totalRecords == 1) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_KEY, tracker.key); } kafkaAttrs.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(tracker.partition)); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_TOPIC, tracker.topic); if (tracker.totalRecords > 1) { // Add a record.count attribute to remain consistent with other record-oriented processors. If not // reading/writing records, then use "kafka.count" attribute. if (tracker.recordWriter == null) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_COUNT, String.valueOf(tracker.totalRecords)); } else { kafkaAttrs.put("record.count", String.valueOf(tracker.totalRecords)); } } final FlowFile newFlowFile = getProcessSession().putAllAttributes(tracker.flowFile, kafkaAttrs); final long executionDurationMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - leaseStartNanos); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, tracker.topic); getProcessSession().getProvenanceReporter().receive(newFlowFile, transitUri, executionDurationMillis); tracker.updateFlowFile(newFlowFile); }
private void populateAttributes(final BundleTracker tracker) { final Map<String, String> kafkaAttrs = new HashMap<>(); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(tracker.initialOffset)); if (tracker.key != null && tracker.totalRecords == 1) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_KEY, tracker.key); } kafkaAttrs.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(tracker.partition)); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_TOPIC, tracker.topic); if (tracker.totalRecords > 1) { // Add a record.count attribute to remain consistent with other record-oriented processors. If not // reading/writing records, then use "kafka.count" attribute. if (tracker.recordWriter == null) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_COUNT, String.valueOf(tracker.totalRecords)); } else { kafkaAttrs.put("record.count", String.valueOf(tracker.totalRecords)); } } final FlowFile newFlowFile = getProcessSession().putAllAttributes(tracker.flowFile, kafkaAttrs); final long executionDurationMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - leaseStartNanos); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, tracker.topic); getProcessSession().getProvenanceReporter().receive(newFlowFile, transitUri, executionDurationMillis); tracker.updateFlowFile(newFlowFile); }
private void processRecords(final ConsumerRecords<byte[], byte[]> records) { records.partitions().stream().forEach(partition -> { List<ConsumerRecord<byte[], byte[]>> messages = records.records(partition); if (!messages.isEmpty()) { //update maximum offset map for this topic partition long maxOffset = messages.stream() .mapToLong(record -> record.offset()) .max() .getAsLong(); //write records to content repository and session if (demarcatorBytes != null) { writeDemarcatedData(getProcessSession(), messages, partition); } else if (readerFactory != null && writerFactory != null) { writeRecordData(getProcessSession(), messages, partition); } else { messages.stream().forEach(message -> { writeData(getProcessSession(), message, partition); }); } totalMessages += messages.size(); uncommittedOffsetsMap.put(partition, new OffsetAndMetadata(maxOffset + 1L)); } }); }
private void processRecords(final ConsumerRecords<byte[], byte[]> records) { records.partitions().stream().forEach(partition -> { List<ConsumerRecord<byte[], byte[]>> messages = records.records(partition); if (!messages.isEmpty()) { //update maximum offset map for this topic partition long maxOffset = messages.stream() .mapToLong(record -> record.offset()) .max() .getAsLong(); //write records to content repository and session if (demarcatorBytes != null) { writeDemarcatedData(getProcessSession(), messages, partition); } else if (readerFactory != null && writerFactory != null) { writeRecordData(getProcessSession(), messages, partition); } else { messages.stream().forEach(message -> { writeData(getProcessSession(), message, partition); }); } totalMessages += messages.size(); uncommittedOffsetsMap.put(partition, new OffsetAndMetadata(maxOffset + 1L)); } }); }
private void processRecords(final ConsumerRecords<byte[], byte[]> records) { records.partitions().stream().forEach(partition -> { List<ConsumerRecord<byte[], byte[]>> messages = records.records(partition); if (!messages.isEmpty()) { //update maximum offset map for this topic partition long maxOffset = messages.stream() .mapToLong(record -> record.offset()) .max() .getAsLong(); //write records to content repository and session if (demarcatorBytes != null) { writeDemarcatedData(getProcessSession(), messages, partition); } else if (readerFactory != null && writerFactory != null) { writeRecordData(getProcessSession(), messages, partition); } else { messages.stream().forEach(message -> { writeData(getProcessSession(), message, partition); }); } totalMessages += messages.size(); uncommittedOffsetsMap.put(partition, new OffsetAndMetadata(maxOffset + 1L)); } }); }
private void processRecords(final ConsumerRecords<byte[], byte[]> records) { records.partitions().stream().forEach(partition -> { List<ConsumerRecord<byte[], byte[]>> messages = records.records(partition); if (!messages.isEmpty()) { //update maximum offset map for this topic partition long maxOffset = messages.stream() .mapToLong(record -> record.offset()) .max() .getAsLong(); //write records to content repository and session if (demarcatorBytes != null) { writeDemarcatedData(getProcessSession(), messages, partition); } else if (readerFactory != null && writerFactory != null) { writeRecordData(getProcessSession(), messages, partition); } else { messages.stream().forEach(message -> { writeData(getProcessSession(), message, partition); }); } totalMessages += messages.size(); uncommittedOffsetsMap.put(partition, new OffsetAndMetadata(maxOffset + 1L)); } }); }
private void processRecords(final ConsumerRecords<byte[], byte[]> records) { records.partitions().stream().forEach(partition -> { List<ConsumerRecord<byte[], byte[]>> messages = records.records(partition); if (!messages.isEmpty()) { //update maximum offset map for this topic partition long maxOffset = messages.stream() .mapToLong(record -> record.offset()) .max() .getAsLong(); uncommittedOffsetsMap.put(partition, new OffsetAndMetadata(maxOffset + 1L)); //write records to content repository and session if (demarcatorBytes == null) { totalFlowFiles += messages.size(); messages.stream().forEach(message -> { writeData(getProcessSession(), message, partition); }); } else { writeData(getProcessSession(), messages, partition); } } }); }
private boolean processBundle(final BundleTracker bundle) throws IOException { final RecordSetWriter writer = bundle.recordWriter; if (writer != null) { final WriteResult writeResult; try { writeResult = writer.finishRecordSet(); } finally { writer.close(); } if (writeResult.getRecordCount() == 0) { getProcessSession().remove(bundle.flowFile); return false; } final Map<String, String> attributes = new HashMap<>(); attributes.putAll(writeResult.getAttributes()); attributes.put(CoreAttributes.MIME_TYPE.key(), writer.getMimeType()); bundle.flowFile = getProcessSession().putAllAttributes(bundle.flowFile, attributes); } populateAttributes(bundle); return true; }
private boolean processBundle(final BundleTracker bundle) throws IOException { final RecordSetWriter writer = bundle.recordWriter; if (writer != null) { final WriteResult writeResult; try { writeResult = writer.finishRecordSet(); } finally { writer.close(); } if (writeResult.getRecordCount() == 0) { getProcessSession().remove(bundle.flowFile); return false; } final Map<String, String> attributes = new HashMap<>(); attributes.putAll(writeResult.getAttributes()); attributes.put(CoreAttributes.MIME_TYPE.key(), writer.getMimeType()); bundle.flowFile = getProcessSession().putAllAttributes(bundle.flowFile, attributes); } populateAttributes(bundle); return true; }
private boolean processBundle(final BundleTracker bundle) throws IOException { final RecordSetWriter writer = bundle.recordWriter; if (writer != null) { final WriteResult writeResult; try { writeResult = writer.finishRecordSet(); } finally { writer.close(); } if (writeResult.getRecordCount() == 0) { getProcessSession().remove(bundle.flowFile); return false; } final Map<String, String> attributes = new HashMap<>(); attributes.putAll(writeResult.getAttributes()); attributes.put(CoreAttributes.MIME_TYPE.key(), writer.getMimeType()); bundle.flowFile = getProcessSession().putAllAttributes(bundle.flowFile, attributes); } populateAttributes(bundle); return true; }
private boolean processBundle(final BundleTracker bundle) throws IOException { final RecordSetWriter writer = bundle.recordWriter; if (writer != null) { final WriteResult writeResult; try { writeResult = writer.finishRecordSet(); } finally { writer.close(); } if (writeResult.getRecordCount() == 0) { getProcessSession().remove(bundle.flowFile); return false; } final Map<String, String> attributes = new HashMap<>(); attributes.putAll(writeResult.getAttributes()); attributes.put(CoreAttributes.MIME_TYPE.key(), writer.getMimeType()); bundle.flowFile = getProcessSession().putAllAttributes(bundle.flowFile, attributes); } populateAttributes(bundle); return true; }
getProcessSession().transfer(bundledFlowFiles, REL_SUCCESS); getProcessSession().commit(); kafkaConsumer.commitSync(uncommittedOffsetsMap); resetInternalState();
getProcessSession().transfer(bundledFlowFiles, REL_SUCCESS); getProcessSession().commit();
getProcessSession().transfer(bundledFlowFiles, REL_SUCCESS); getProcessSession().commit();
getProcessSession().transfer(bundledFlowFiles, REL_SUCCESS); getProcessSession().commit();
getProcessSession().transfer(bundledFlowFiles, REL_SUCCESS); getProcessSession().commit();
private void populateAttributes(final BundleTracker tracker) { final Map<String, String> kafkaAttrs = new HashMap<>(); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(tracker.initialOffset)); if (tracker.key != null && tracker.totalRecords == 1) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_KEY, tracker.key); } kafkaAttrs.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(tracker.partition)); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_TOPIC, tracker.topic); if (tracker.totalRecords > 1) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_COUNT, String.valueOf(tracker.totalRecords)); } final FlowFile newFlowFile = getProcessSession().putAllAttributes(tracker.flowFile, kafkaAttrs); final long executionDurationMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - leaseStartNanos); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, tracker.topic); getProcessSession().getProvenanceReporter().receive(newFlowFile, transitUri, executionDurationMillis); tracker.updateFlowFile(newFlowFile); }