/** Give a line saying that something is " done". */ public void done(String msg) { log.info(msg + " done [" + toSecondsString() + " sec]."); }
/** Finish the line from doing() with the end of the timing done message * and elapsed time in x.y seconds. */ public void done() { log.info("done [" + toSecondsString() + " sec]."); }
/** This method allows you to show the results of timing according to another class' logger. * E.g., {@code timing.done(logger, "Loading lexicon")}. * * @param logger Logger to log a timed operation with * @param msg Message to report. */ public void done(Redwood.RedwoodChannels logger, StringBuilder msg) { msg.append("... done [").append(toSecondsString()).append(" sec]."); logger.info(msg.toString()); }
public void done(StringBuilder msg) { msg.append(" done [").append(toSecondsString()).append(" sec]."); log.info(msg.toString()); }
/** Returns the number of seconds passed since the timer started in the form "d.d". */ public String toSecondsString() { return toSecondsString(report()); }
public void done(Redwood.RedwoodChannels logger, String msg) { logger.info(msg + " ... done [" + toSecondsString() + " sec]."); }
/** Return a String that gives detailed human-readable information about * how much time was spent by each annotator and by the entire annotation * pipeline. This String includes newline characters but does not end * with one, and so it is suitable to be printed out with a * {@code println()}. * * @return Human readable information on time spent in processing. */ public String timingInformation() { StringBuilder sb = new StringBuilder(); if (TIME) { sb.append("Annotation pipeline timing information:"); sb.append(IOUtils.eolChar); Iterator<MutableLong> it = accumulatedTime.iterator(); long total = 0; for (Annotator annotator : annotators) { MutableLong m = it.next(); sb.append(StringUtils.getShortClassName(annotator)).append(": "); sb.append(Timing.toSecondsString(m.longValue())).append(" sec."); sb.append(IOUtils.eolChar); total += m.longValue(); } sb.append("TOTAL: ").append(Timing.toSecondsString(total)).append(" sec."); } return sb.toString(); }
@Override public List<ScoredObject<Tree>> next() { if (!done) { List<ScoredObject<Tree>> cur = next; next = getNext(); processed++; if (next == null) { logger.info("Read " + processed + " trees, from " + inputDesc + " in " + timing.toSecondsString() + " secs"); done = true; if (closeBufferNeeded) { try { br.close(); } catch (IOException ex) { logger.warn(ex); } } } return cur; } else { throw new NoSuchElementException("No more elements from " + inputDesc); } } } // end static class ScoredParsesIterator
/** * Helper method for printing out timing info after an annotation run * * @param pipeline the StanfordCoreNLP pipeline to log timing info for * @param tim the Timing object to log timing info */ private static void logTimingInfo(StanfordCoreNLP pipeline, Timing tim) { logger.info(""); // puts blank line in logging output logger.info(pipeline.timingInformation()); logger.info("Pipeline setup: " + Timing.toSecondsString(pipeline.pipelineSetupTime) + " sec."); logger.info("Total time for StanfordCoreNLP pipeline: " + Timing.toSecondsString(pipeline.pipelineSetupTime + tim.report()) + " sec."); }
public static <T> T readObjectAnnouncingTimingFromURLOrClasspathOrFileSystem(Redwood.RedwoodChannels log, String msg, String path) { T obj; try { Timing timing = new Timing(); obj = IOUtils.readObjectFromURLOrClasspathOrFileSystem(path); log.info(msg + ' ' + path + " ... done [" + timing.toSecondsString() + " sec]."); } catch (IOException | ClassNotFoundException e) { throw new RuntimeIOException(e); } return obj; }
@Override protected Collection<List<IN>> loadAuxiliaryData(Collection<List<IN>> docs, DocumentReaderAndWriter<IN> readerAndWriter) { if (flags.unsupDropoutFile != null) { log.info("Reading unsupervised dropout data from file: " + flags.unsupDropoutFile); Timing timer = new Timing(); timer.start(); unsupDocs = new ArrayList<>(); ObjectBank<List<IN>> unsupObjBank = makeObjectBankFromFile(flags.unsupDropoutFile, readerAndWriter); for (List<IN> doc : unsupObjBank) { for (IN tok: doc) { tok.set(CoreAnnotations.AnswerAnnotation.class, flags.backgroundSymbol); tok.set(CoreAnnotations.GoldAnswerAnnotation.class, flags.backgroundSymbol); } unsupDocs.add(doc); } long elapsedMs = timer.stop(); log.info("Time to read: : " + Timing.toSecondsString(elapsedMs) + " seconds"); } if (unsupDocs != null && flags.doFeatureDiscovery) { List<List<IN>> totalDocs = new ArrayList<>(); totalDocs.addAll(docs); totalDocs.addAll(unsupDocs); return totalDocs; } else return docs; }
@Override protected CRFLogConditionalObjectiveFunction getObjectiveFunction(int[][][][] data, int[][] labels) { int[][][][] unsupDropoutData = null; if (unsupDocs != null) { Timing timer = new Timing(); timer.start(); List<Triple<int[][][], int[], double[][][]>> unsupDataAndLabels = documentsToDataAndLabelsList(unsupDocs); unsupDropoutData = new int[unsupDataAndLabels.size()][][][]; for (int q=0; q<unsupDropoutData.length; q++) unsupDropoutData[q] = unsupDataAndLabels.get(q).first(); long elapsedMs = timer.stop(); log.info("Time to read unsupervised dropout data: " + Timing.toSecondsString(elapsedMs) + " seconds, read " + unsupDropoutData.length + " files"); } return new CRFLogConditionalObjectiveFunctionWithDropout(data, labels, windowSize, classIndex, labelIndices, map, flags.priorType, flags.backgroundSymbol, flags.sigma, null, flags.dropoutRate, flags.dropoutScale, flags.multiThreadGrad, flags.dropoutApprox, flags.unsupDropoutScale, unsupDropoutData); }
@Override public void dumpFeatures(Collection<List<IN>> docs) { if (flags.exportFeatures != null) { Timing timer = new Timing(); CRFFeatureExporter<IN> featureExporter = new CRFFeatureExporter<>(this); featureExporter.printFeatures(flags.exportFeatures, docs); long elapsedMs = timer.stop(); log.info("Time to export features: " + Timing.toSecondsString(elapsedMs) + " seconds"); } }
public static LexicalizedParser getParserFromSerializedFile(String serializedFileOrUrl) { try { Timing tim = new Timing(); ObjectInputStream in = IOUtils.readStreamFromString(serializedFileOrUrl); LexicalizedParser pd = loadModel(in); in.close(); log.info("Loading parser from serialized file " + serializedFileOrUrl + " ... done [" + tim.toSecondsString() + " sec]."); return pd; } catch (InvalidClassException ice) { // For this, it's not a good idea to continue and try it as a text file! throw new RuntimeException("Invalid class in file: " + serializedFileOrUrl, ice); } catch (FileNotFoundException fnfe) { // For this, it's not a good idea to continue and try it as a text file! throw new RuntimeException("File not found: " + serializedFileOrUrl, fnfe); } catch (StreamCorruptedException sce) { // suppress error message, on the assumption that we've really got // a text grammar, and that'll be tried next log.info("Attempting to load " + serializedFileOrUrl + " as a serialized grammar caused error below, but this may just be because it's a text grammar!"); log.info(sce); } catch (Exception e) { log.error(e); } return null; }
logger.info("Reading dataset from " + filename + " ... done [" + tim.toSecondsString() + "s, " + dataset.size() + " items]."); return new Pair<>(dataset, lineInfos);
+ numFeatures); log.info("numWeights: orig1=" + oldNumWeights1 + ", orig2=" + oldNumWeights2 + ", combined=" + numWeights); log.info("Time to combine CRFClassifier: " + Timing.toSecondsString(elapsedMs) + " seconds");
dg.readData(in); log.info("Loading parser from text file " + textFileOrUrl + " ... done [" + tim.toSecondsString() + " sec]."); return new LexicalizedParser(lex, bg, ug, dg, stateIndex, wordIndex, tagIndex, op); } catch (IOException e) {
System.out.println(ap.timingInformation()); log.info("Total time for AnnotationPipeline: " + tim.toSecondsString() + " sec.");
sayln(" Tuning completed in: " + Timing.toSecondsString(timer.report()) + " s"); return besteta;
sayln("Completed in: " + Timing.toSecondsString(total.report()) + " s");