@Override public List<IN> process(List<IN> doc) { doc = classify(doc); int completedNo = threadCompletionCounter.incrementAndGet(); if (flags.verboseMode) log.info(completedNo + " examples completed"); return doc; } @Override
/** * Classify the tokens in an ObjectBank. * * @param documents The documents in an ObjectBank to classify. * @return {@link List} of classified sentences (each a List of something that * extends {@link CoreMap}). */ private List<List<IN>> classifyObjectBank(ObjectBank<List<IN>> documents) { List<List<IN>> result = new ArrayList<>(); for (List<IN> document : documents) { classify(document); List<IN> sentence = new ArrayList<>(); for (IN wi : document) { // TaggedWord word = new TaggedWord(wi.word(), wi.answer()); // sentence.add(word); sentence.add(wi); } result.add(sentence); } return result; }
public List<String> segmentString(String sentence, DocumentReaderAndWriter<IN> readerAndWriter) { ObjectBank<List<IN>> docs = makeObjectBankFromString(sentence, readerAndWriter); StringWriter stringWriter = new StringWriter(); PrintWriter stringPrintWriter = new PrintWriter(stringWriter); for (List<IN> doc : docs) { classify(doc); readerAndWriter.printAnswers(doc, stringPrintWriter); stringPrintWriter.println(); } stringPrintWriter.close(); String segmented = stringWriter.toString(); return Arrays.asList(segmented.split("\\s")); }
/** * Classify a List of IN. This method returns a new list of tokens, not * the list of tokens passed in, and runs the new tokens through * ObjectBankWrapper. (Both these behaviors are different from that of the * classify(List) method. * * @param tokenSequence The List of IN to be classified. * @return The classified List of IN, where the classifier output for * each token is stored in its * {@link edu.stanford.nlp.ling.CoreAnnotations.AnswerAnnotation} * field. */ public List<IN> classifySentence(List<? extends HasWord> tokenSequence) { List<IN> document = preprocessTokens(tokenSequence); classify(document); return document; }
List<IN> docOutput = classify(doc); if (textDocumentReaderAndWriter instanceof PlainTextDocumentReaderAndWriter) {
copyL = nsc.classify(copyL);
Triple<String, Integer, Integer> prevEntity = null; classify(doc);
@Override public List<IN> process(List<IN> doc) { doc = classify(doc); int completedNo = threadCompletionCounter.incrementAndGet(); if (flags.verboseMode) System.err.println(completedNo + " examples completed"); return doc; }
@Override public List<IN> process(List<IN> doc) { doc = classify(doc); int completedNo = threadCompletionCounter.incrementAndGet(); if (flags.verboseMode) log.info(completedNo + " examples completed"); return doc; } @Override
/** * Classify the tokens in an ObjectBank. * * @param documents The documents in an ObjectBank to classify. * @return {@link List} of classified sentences (each a List of something that * extends {@link CoreMap}). */ private List<List<IN>> classifyObjectBank(ObjectBank<List<IN>> documents) { List<List<IN>> result = new ArrayList<>(); for (List<IN> document : documents) { classify(document); List<IN> sentence = new ArrayList<>(); for (IN wi : document) { // TaggedWord word = new TaggedWord(wi.word(), wi.answer()); // sentence.add(word); sentence.add(wi); } result.add(sentence); } return result; }
public List<String> segmentString(String sentence, DocumentReaderAndWriter<IN> readerAndWriter) { ObjectBank<List<IN>> docs = makeObjectBankFromString(sentence, readerAndWriter); StringWriter stringWriter = new StringWriter(); PrintWriter stringPrintWriter = new PrintWriter(stringWriter); for (List<IN> doc : docs) { classify(doc); readerAndWriter.printAnswers(doc, stringPrintWriter); stringPrintWriter.println(); } stringPrintWriter.close(); String segmented = stringWriter.toString(); return Arrays.asList(segmented.split("\\s")); }
/** * Classify a List of IN. This method returns a new list of tokens, not * the list of tokens passed in, and runs the new tokens through * ObjectBankWrapper. (Both these behaviors are different from that of the * classify(List) method. * * @param tokenSequence The List of IN to be classified. * @return The classified List of IN, where the classifier output for * each token is stored in its * {@link edu.stanford.nlp.ling.CoreAnnotations.AnswerAnnotation} * field. */ public List<IN> classifySentence(List<? extends HasWord> tokenSequence) { List<IN> document = preprocessTokens(tokenSequence); classify(document); return document; }
public List<String> segmentString(String sentence, DocumentReaderAndWriter<IN> readerAndWriter) { ObjectBank<List<IN>> docs = makeObjectBankFromString(sentence, readerAndWriter); StringWriter stringWriter = new StringWriter(); PrintWriter stringPrintWriter = new PrintWriter(stringWriter); for (List<IN> doc : docs) { classify(doc); readerAndWriter.printAnswers(doc, stringPrintWriter); stringPrintWriter.println(); } stringPrintWriter.close(); String segmented = stringWriter.toString(); return Arrays.asList(segmented.split("\\s")); }
public List<String> segmentString(String sentence, DocumentReaderAndWriter<IN> readerAndWriter) { ObjectBank<List<IN>> docs = makeObjectBankFromString(sentence, readerAndWriter); StringWriter stringWriter = new StringWriter(); PrintWriter stringPrintWriter = new PrintWriter(stringWriter); for (List<IN> doc : docs) { classify(doc); readerAndWriter.printAnswers(doc, stringPrintWriter); stringPrintWriter.println(); } stringPrintWriter.close(); String segmented = stringWriter.toString(); return Arrays.asList(segmented.split("\\s")); }
/** * Classify the tokens in a String. Each sentence becomes a separate * document. Doesn't override default readerAndWriter. * * @param str * A String with tokens in one or more sentences of text to be * classified. * @return {@link List} of classified sentences (each a List of something * that extends {@link CoreMap}). */ public List<List<IN>> classifyRaw(String str, DocumentReaderAndWriter<IN> readerAndWriter) { ObjectBank<List<IN>> documents = makeObjectBankFromString(str, readerAndWriter); List<List<IN>> result = new ArrayList<List<IN>>(); for (List<IN> document : documents) { classify(document); List<IN> sentence = new ArrayList<IN>(); for (IN wi : document) { // TaggedWord word = new TaggedWord(wi.word(), wi.answer()); // sentence.add(word); sentence.add(wi); } result.add(sentence); } return result; }
/** * Classify the contents of a file. * * @param filename * Contains the sentence(s) to be classified. * @return {@link List} of classified List of IN. */ public List<List<IN>> classifyFile(String filename) { ObjectBank<List<IN>> documents = makeObjectBankFromFile(filename, plainTextReaderAndWriter); List<List<IN>> result = new ArrayList<List<IN>>(); for (List<IN> document : documents) { // System.err.println(document); classify(document); List<IN> sentence = new ArrayList<IN>(); for (IN wi : document) { sentence.add(wi); // System.err.println(wi); } result.add(sentence); } return result; }
/** * Classify the tokens in a String. Each sentence becomes a separate * document. * * @param str * A String with tokens in one or more sentences of text to be * classified. * @return {@link List} of classified sentences (each a List of something * that extends {@link CoreMap}). */ public List<List<IN>> classify(String str) { ObjectBank<List<IN>> documents = makeObjectBankFromString(str, plainTextReaderAndWriter); List<List<IN>> result = new ArrayList<List<IN>>(); for (List<IN> document : documents) { classify(document); List<IN> sentence = new ArrayList<IN>(); for (IN wi : document) { // TaggedWord word = new TaggedWord(wi.word(), wi.answer()); // sentence.add(word); sentence.add(wi); } result.add(sentence); } return result; }
/** * Classify the contents of a file. * * @param filename * Contains the sentence(s) to be classified. * @return {@link List} of classified List of IN. */ public List<List<IN>> classifyFile(String filename) { DocumentReaderAndWriter<IN> readerAndWriter = new PlainTextDocumentReaderAndWriter<IN>(); readerAndWriter.init(flags); ObjectBank<List<IN>> documents = makeObjectBankFromFile(filename, readerAndWriter); List<List<IN>> result = new ArrayList<List<IN>>(); for (List<IN> document : documents) { // System.err.println(document); classify(document); List<IN> sentence = new ArrayList<IN>(); for (IN wi : document) { sentence.add(wi); // System.err.println(wi); } result.add(sentence); } return result; }
/** * Classify the tokens in a String. Each sentence becomes a separate document. * * @param str * A String with tokens in one or more sentences of text to be * classified. * @return {@link List} of classified sentences (each a List of something that * extends {@link CoreMap}). */ public List<List<IN>> classify(String str) { DocumentReaderAndWriter<IN> readerAndWriter = new PlainTextDocumentReaderAndWriter<IN>(); readerAndWriter.init(flags); ObjectBank<List<IN>> documents = makeObjectBankFromString(str, readerAndWriter); List<List<IN>> result = new ArrayList<List<IN>>(); for (List<IN> document : documents) { classify(document); List<IN> sentence = new ArrayList<IN>(); for (IN wi : document) { // TaggedWord word = new TaggedWord(wi.word(), wi.answer()); // sentence.add(word); sentence.add(wi); } result.add(sentence); } return result; }