addElement(map, prefix, key, value, attributes); stopWatch.stop(); getLogger().debug("Processed {} in {}", new Object[] {name, stopWatch.getDuration(TimeUnit.MILLISECONDS)});
@OnScheduled public void onScheduled(final ProcessContext context) throws IOException { final String dbFileString = context.getProperty(GEO_DATABASE_FILE).getValue(); final File dbFile = new File(dbFileString); final StopWatch stopWatch = new StopWatch(true); final DatabaseReader reader = new DatabaseReader.Builder(dbFile).build(); stopWatch.stop(); getLogger().info("Completed loading of Maxmind Database. Elapsed time was {} milliseconds.", new Object[]{stopWatch.getDuration(TimeUnit.MILLISECONDS)}); databaseReaderRef.set(reader); }
try { response = dbReader.city(inetAddress); stopWatch.stop(); } catch (final IOException ex) {
try { response = dbReader.isp(inetAddress); stopWatch.stop(); } catch (final IOException ex) {
private void loadDatabase(final File dbFile, final String dbFileChecksum) throws IOException { final StopWatch stopWatch = new StopWatch(true); final DatabaseReader reader = new DatabaseReader.Builder(dbFile).build(); stopWatch.stop(); getLogger().info("Completed loading of Maxmind Database. Elapsed time was {} milliseconds.", new Object[]{stopWatch.getDuration(TimeUnit.MILLISECONDS)}); databaseReader = reader; databaseChecksum = dbFileChecksum; }
long bytesReceived = transaction.getBytesSent(); StopWatch stopWatch = transaction.getStopWatch(); stopWatch.stop(); final String flowFileDescription = flowFilesReceived.size() < 20 ? flowFilesReceived.toString() : flowFilesReceived.size() + " FlowFiles"; final String uploadDataRate = stopWatch.calculateDataRate(bytesReceived);
@Override public void process(InputStream in, OutputStream out) throws IOException { // Use a FilterableOutputStream to change 'InputStreamWritable' to 'BytesWritable' - see comment // above for an explanation of why we want to do this. final ByteFilteringOutputStream bwos = new ByteFilteringOutputStream(out); // TODO: Adding this filter could be dangerous... A Sequence File's header contains 3 bytes: "SEQ", // followed by 1 byte that is the Sequence File version, followed by 2 "entries." These "entries" // contain the size of the Key/Value type and the Key/Value type. So, we will be writing the // value type as InputStreamWritable -- which we need to change to BytesWritable. This means that // we must also change the "size" that is written, but replacing this single byte could be // dangerous. However, we know exactly what will be written to the header, and we limit this at one // replacement, so we should be just fine. bwos.addFilter(toReplace, replaceWith, 1); bwos.addFilter((byte) InputStreamWritable.class.getCanonicalName().length(), (byte) BytesWritable.class.getCanonicalName().length(), 1); try (final FSDataOutputStream fsDataOutputStream = new FSDataOutputStream(bwos, new Statistics("")); final SequenceFile.Writer writer = SequenceFile.createWriter(configuration, SequenceFile.Writer.stream(fsDataOutputStream), SequenceFile.Writer.keyClass(Text.class), SequenceFile.Writer.valueClass(InputStreamWritable.class), SequenceFile.Writer.compression(compressionType, compressionCodec))) { processInputStream(in, flowFile, writer); } finally { watch.stop(); } } });
@Override public void data(final InputStream data) throws RejectException, TooMuchDataException, IOException { final ProcessSession processSession = sessionFactory.createSession(); final StopWatch watch = new StopWatch(); watch.start(); try { FlowFile flowFile = processSession.create(); final AtomicBoolean limitExceeded = new AtomicBoolean(false); flowFile = processSession.write(flowFile, (OutputStream out) -> { final LimitingInputStream lis = new LimitingInputStream(data, maxMessageSize); IOUtils.copy(lis, out); if (lis.hasReachedLimit()) { limitExceeded.set(true); } }); if (limitExceeded.get()) { throw new TooMuchDataException("Maximum message size limit reached - client must send smaller messages"); } flowFile = processSession.putAllAttributes(flowFile, extractMessageAttributes()); watch.stop(); processSession.getProvenanceReporter().receive(flowFile, "smtp://" + host + ":" + port + "/", watch.getDuration(TimeUnit.MILLISECONDS)); processSession.transfer(flowFile, ListenSMTP.REL_SUCCESS); processSession.commit(); } catch (FlowFileAccessException | IllegalStateException | RejectException | IOException ex) { log.error("Unable to fully process input due to " + ex.getMessage(), ex); throw ex; } finally { processSession.rollback(); //make sure this happens no matter what - is safe } }
stopWatch.stop(); final String flowFileDescription = flowFilesReceived.size() < 20 ? flowFilesReceived.toString() : flowFilesReceived.size() + " FlowFiles"; final String uploadDataRate = stopWatch.calculateDataRate(bytesReceived);
return; timer.stop();
stopWatch.stop(); final String uploadDataRate = stopWatch.calculateDataRate(bytesSent); final long uploadMillis = stopWatch.getDuration(TimeUnit.MILLISECONDS);
stopWatch.stop(); final String uploadDataRate = stopWatch.calculateDataRate(bytesSent); final long uploadMillis = stopWatch.getDuration(TimeUnit.MILLISECONDS);
context.yield(); } finally { stopWatch.stop(); long totalSize = 0; for (FlowFile flowFile : flowFiles) {
flowFile = session.putAttribute(flowFile, CoreAttributes.FILENAME.key(), outputFilename); stopWatch.stop(); getLogger().info("Successfully received content from {} for {} in {}", new Object[] {qualifiedPath, flowFile, stopWatch.getDuration()}); session.getProvenanceReporter().fetch(flowFile, qualifiedPath.toString(), stopWatch.getDuration(TimeUnit.MILLISECONDS));
stopWatch.stop(); final String dataRate = stopWatch.calculateDataRate(flowFile.getSize()); final long millis = stopWatch.getDuration(TimeUnit.MILLISECONDS);
stopWatch.stop(); getLogger().debug("Successfully processed {} in {}", new Object[] {flowFile, stopWatch.getDuration(TimeUnit.MILLISECONDS)}); if(getLogger().isDebugEnabled()){
} finally { if (!error) { stopWatch.stop(); long duration = stopWatch.getDuration(TimeUnit.MILLISECONDS); String transitUri = "cassandra://" + connectionSession.getCluster().getMetadata().getClusterName() + "." + cassandraTable;
@OnScheduled public void onScheduled(final ProcessContext context) throws IOException { getLogger().debug("Loading packages"); final StopWatch stopWatch = new StopWatch(true); // Load required MDHT packages System.setProperty( "org.eclipse.emf.ecore.EPackage.Registry.INSTANCE", "org.eclipse.emf.ecore.impl.EPackageRegistryImpl" ); CDAPackage.eINSTANCE.eClass(); HITSPPackage.eINSTANCE.eClass(); CCDPackage.eINSTANCE.eClass(); ConsolPackage.eINSTANCE.eClass(); IHEPackage.eINSTANCE.eClass(); stopWatch.stop(); getLogger().debug("Loaded packages in {}", new Object[] {stopWatch.getDuration(TimeUnit.MILLISECONDS)}); // Initialize JEXL jexl = new JexlBuilder().cache(1024).debug(false).silent(true).strict(false).create(); jexlCtx = new MapContext(); getLogger().debug("Loading mappings"); loadMappings(); // Load CDA mappings for parser }
timer.stop();
stopWatch.stop(); if (processingSummary.getFlowFilesCreated() > 0) { final float secs = (stopWatch.getDuration(TimeUnit.MILLISECONDS) / 1000F);