@Override public void data(final InputStream data) throws RejectException, TooMuchDataException, IOException { final ProcessSession processSession = sessionFactory.createSession(); final StopWatch watch = new StopWatch(); watch.start(); try { FlowFile flowFile = processSession.create(); final AtomicBoolean limitExceeded = new AtomicBoolean(false); flowFile = processSession.write(flowFile, (OutputStream out) -> { final LimitingInputStream lis = new LimitingInputStream(data, maxMessageSize); IOUtils.copy(lis, out); if (lis.hasReachedLimit()) { limitExceeded.set(true); } }); if (limitExceeded.get()) { throw new TooMuchDataException("Maximum message size limit reached - client must send smaller messages"); } flowFile = processSession.putAllAttributes(flowFile, extractMessageAttributes()); watch.stop(); processSession.getProvenanceReporter().receive(flowFile, "smtp://" + host + ":" + port + "/", watch.getDuration(TimeUnit.MILLISECONDS)); processSession.transfer(flowFile, ListenSMTP.REL_SUCCESS); processSession.commit(); } catch (FlowFileAccessException | IllegalStateException | RejectException | IOException ex) { log.error("Unable to fully process input due to " + ex.getMessage(), ex); throw ex; } finally { processSession.rollback(); //make sure this happens no matter what - is safe } }
final StopWatch stopWatch = new StopWatch(true); final List<FileInfo> listing = transfer.getListing(); final long millis = stopWatch.getElapsed(TimeUnit.MILLISECONDS);
long bytesReceived = transaction.getBytesSent(); StopWatch stopWatch = transaction.getStopWatch(); stopWatch.stop(); final String flowFileDescription = flowFilesReceived.size() < 20 ? flowFilesReceived.toString() : flowFilesReceived.size() + " FlowFiles"; final String uploadDataRate = stopWatch.calculateDataRate(bytesReceived); final long uploadMillis = stopWatch.getDuration(TimeUnit.MILLISECONDS); final String dataSize = FormatUtils.formatDataSize(bytesReceived); logger.info("{} Successfully received {} ({}) from {} in {} milliseconds at a rate of {}", new Object[]{
private void loadDatabase(final File dbFile, final String dbFileChecksum) throws IOException { final StopWatch stopWatch = new StopWatch(true); final DatabaseReader reader = new DatabaseReader.Builder(dbFile).build(); stopWatch.stop(); getLogger().info("Completed loading of Maxmind Database. Elapsed time was {} milliseconds.", new Object[]{stopWatch.getDuration(TimeUnit.MILLISECONDS)}); databaseReader = reader; databaseChecksum = dbFileChecksum; }
final StopWatch watch = new StopWatch(true); FlowFile sfFlowFile = session.write(flowFile, new StreamCallback() { new Object[]{sequenceFilename, watch.calculateDataRate(flowFile.getSize())}); return sfFlowFile;
final StopWatch stopWatch = new StopWatch(true); getLogger().debug("Loaded document for {} in {}", new Object[] {flowFile, stopWatch.getElapsed(TimeUnit.MILLISECONDS)}); stopWatch.stop(); getLogger().debug("Successfully processed {} in {}", new Object[] {flowFile, stopWatch.getDuration(TimeUnit.MILLISECONDS)}); if(getLogger().isDebugEnabled()){ for (Entry<String, String> entry : attributes.entrySet()) {
flowFile = session.putAttribute(flowFile, CoreAttributes.FILENAME.key(), outputFilename); stopWatch.stop(); getLogger().info("Successfully received content from {} for {} in {}", new Object[] {qualifiedPath, flowFile, stopWatch.getDuration()}); session.getProvenanceReporter().fetch(flowFile, qualifiedPath.toString(), stopWatch.getDuration(TimeUnit.MILLISECONDS)); session.transfer(flowFile, REL_SUCCESS); } catch (final FileNotFoundException | AccessControlException e) {
public FlowFileTransaction() { this(null, null, new StopWatch(true), 0, null, null); }
@Override public void process(InputStream in, OutputStream out) throws IOException { // Use a FilterableOutputStream to change 'InputStreamWritable' to 'BytesWritable' - see comment // above for an explanation of why we want to do this. final ByteFilteringOutputStream bwos = new ByteFilteringOutputStream(out); // TODO: Adding this filter could be dangerous... A Sequence File's header contains 3 bytes: "SEQ", // followed by 1 byte that is the Sequence File version, followed by 2 "entries." These "entries" // contain the size of the Key/Value type and the Key/Value type. So, we will be writing the // value type as InputStreamWritable -- which we need to change to BytesWritable. This means that // we must also change the "size" that is written, but replacing this single byte could be // dangerous. However, we know exactly what will be written to the header, and we limit this at one // replacement, so we should be just fine. bwos.addFilter(toReplace, replaceWith, 1); bwos.addFilter((byte) InputStreamWritable.class.getCanonicalName().length(), (byte) BytesWritable.class.getCanonicalName().length(), 1); try (final FSDataOutputStream fsDataOutputStream = new FSDataOutputStream(bwos, new Statistics("")); final SequenceFile.Writer writer = SequenceFile.createWriter(configuration, SequenceFile.Writer.stream(fsDataOutputStream), SequenceFile.Writer.keyClass(Text.class), SequenceFile.Writer.valueClass(InputStreamWritable.class), SequenceFile.Writer.compression(compressionType, compressionCodec))) { processInputStream(in, flowFile, writer); } finally { watch.stop(); } } });
private void transferTo(Relationship relationship, ProcessSession session, StopWatch stopWatch, String eventHubName, String partitionId, String consumerGroup, FlowFile flowFile) { session.transfer(flowFile, relationship); final String transitUri = "amqps://" + namespaceName + ".servicebus.windows.net/" + eventHubName + "/ConsumerGroups/" + consumerGroup + "/Partitions/" + partitionId; session.getProvenanceReporter().receive(flowFile, transitUri, stopWatch.getElapsed(TimeUnit.MILLISECONDS)); }
/** * @param autoStart whether or not the timer should be started automatically */ public StopWatch(final boolean autoStart) { if (autoStart) { start(); } }
@OnScheduled public void onScheduled(final ProcessContext context) throws IOException { final String dbFileString = context.getProperty(GEO_DATABASE_FILE).getValue(); final File dbFile = new File(dbFileString); final StopWatch stopWatch = new StopWatch(true); final DatabaseReader reader = new DatabaseReader.Builder(dbFile).build(); stopWatch.stop(); getLogger().info("Completed loading of Maxmind Database. Elapsed time was {} milliseconds.", new Object[]{stopWatch.getDuration(TimeUnit.MILLISECONDS)}); databaseReaderRef.set(reader); }
@Override public void onEvents(PartitionContext context, Iterable<EventData> messages) throws Exception { final ProcessSession session = processSessionFactory.createSession(); try { final StopWatch stopWatch = new StopWatch(true); if (readerFactory != null && writerFactory != null) { writeRecords(context, messages, session, stopWatch); } else { writeFlowFiles(context, messages, session, stopWatch); } // Commit NiFi first. session.commit(); // If creating an Event Hub checkpoint failed, then the same message can be retrieved again. context.checkpoint(); } catch (Exception e) { getLogger().error("Unable to fully process received message due to " + e, e); // FlowFiles those are already committed will not get rollback. session.rollback(); } }
/** * @param autoStart whether or not the timer should be started automatically */ public StopWatch(final boolean autoStart) { if (autoStart) { start(); } }
final StopWatch stopWatch = new StopWatch(false); try { stopWatch.start(); if (!hdfs.exists(file)) { continue; // If file is no longer here move on. context.yield(); } finally { stopWatch.stop(); long totalSize = 0; for (FlowFile flowFile : flowFiles) { final String dataRate = stopWatch.calculateDataRate(totalSize); final long millis = stopWatch.getDuration(TimeUnit.MILLISECONDS); logger.info("Created {} flowFiles from SequenceFile {}. Ingested in {} milliseconds at a rate of {}", new Object[]{ flowFiles.size(), file.toUri().toASCIIString(), millis, dataRate});
final StopWatch stopWatch = new StopWatch(true); addElement(map, prefix, key, value, attributes); stopWatch.stop(); getLogger().debug("Processed {} in {}", new Object[] {name, stopWatch.getDuration(TimeUnit.MILLISECONDS)});
final StopWatch stopWatch = new StopWatch(true); for (int i = 0; i < SECTIONS_PER_CONTAINER; i++) { final Path sectionContainer = container.resolve(String.valueOf(i)); final long deleteExpiredMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS); final long sortRemainingMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS) - deleteExpiredMillis; final long deleteOldestMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS) - sortRemainingMillis - deleteExpiredMillis; final long cleanupMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS) - deleteOldestMillis - sortRemainingMillis - deleteExpiredMillis; LOG.debug("Oldest Archive Date for Container {} is {}; delete expired = {} ms, sort remaining = {} ms, delete oldest = {} ms, cleanup = {} ms", containerName, new Date(oldestContainerArchive), deleteExpiredMillis, sortRemainingMillis, deleteOldestMillis, cleanupMillis);
stopWatch.stop(); final String uploadDataRate = stopWatch.calculateDataRate(bytesSent); final long uploadMillis = stopWatch.getDuration(TimeUnit.MILLISECONDS); final String dataSize = FormatUtils.formatDataSize(bytesSent); logger.info("{} Successfully sent {} ({}) to {} in {} milliseconds at a rate of {}", new Object[]{
final StopWatch stopWatch = new StopWatch(true); final FlowFile finalFlowFile = flowFile;
private int receiveFlowFiles(final Transaction transaction, final ProcessContext context, final ProcessSession session) throws IOException, ProtocolException { final String userDn = transaction.getCommunicant().getDistinguishedName(); final StopWatch stopWatch = new StopWatch(true); final Set<FlowFile> flowFilesReceived = new HashSet<>(); long bytesReceived = 0L; stopWatch.stop(); final String flowFileDescription = flowFilesReceived.size() < 20 ? flowFilesReceived.toString() : flowFilesReceived.size() + " FlowFiles"; final String uploadDataRate = stopWatch.calculateDataRate(bytesReceived); final long uploadMillis = stopWatch.getDuration(TimeUnit.MILLISECONDS); final String dataSize = FormatUtils.formatDataSize(bytesReceived); logger.info("{} Successfully received {} ({}) from {} in {} milliseconds at a rate of {}", new Object[]{