public static void processCoreNLPIfDoesNotExist(File processedFile, Properties coreNLPProps, String text) { if (!processedFile.exists()) { try { StanfordCoreNLP coreNLP = new StanfordCoreNLP(coreNLPProps); Annotation processedAnnotation = coreNLP.process(text); //this document holds the split for paragraphs. ProtobufAnnotationSerializer pas = new ProtobufAnnotationSerializer(true); OutputStream fos = new BufferedOutputStream(new FileOutputStream(processedFile.getAbsolutePath())); pas.write(processedAnnotation, fos); } catch (IOException e) { e.printStackTrace(); } } }
public static void testPP(String familyFile, String animateFile, String genderFile, String charactersFile, String modelFile) throws IOException, ClassNotFoundException { Properties props = new Properties(); props.setProperty("annotators", "tokenize, ssplit, pos, lemma, ner, depparse, quote, quoteattribution"); props.setProperty("quoteattribution.familyWordsFile", familyFile); props.setProperty("quoteattribution.animacyWordsFile", animateFile); props.setProperty("quoteattribution.genderNamesFile", genderFile); props.setProperty("quoteattribution.charactersPath", charactersFile); props.setProperty("quoteattribution.modelPath", modelFile); StanfordCoreNLP coreNLP = new StanfordCoreNLP(props); Annotation processedAnnotation = coreNLP.process(test); List<CoreMap> quotes = processedAnnotation.get(CoreAnnotations.QuotationsAnnotation.class); for(CoreMap quote : quotes) { System.out.println("Quote: " + quote.get(CoreAnnotations.TextAnnotation.class)); if(quote.get(QuoteAttributionAnnotator.MentionAnnotation.class) != null) { System.out.println("Predicted Mention: " + quote.get(QuoteAttributionAnnotator.MentionAnnotation.class) + " Predictor: " + quote.get(QuoteAttributionAnnotator.MentionSieveAnnotation.class)); } else { System.out.println("Predicted Mention: none"); } if(quote.get(QuoteAttributionAnnotator.SpeakerAnnotation.class) != null) { System.out.println("Predicted Speaker: " + quote.get(QuoteAttributionAnnotator.SpeakerAnnotation.class) + " Predictor: " + quote.get(QuoteAttributionAnnotator.SpeakerSieveAnnotation.class)); } else { System.out.println("Predicted Speaker: none"); } System.out.println("===="); } System.out.println("Finished"); }
Annotation anno = process(line); outputAnnotation(System.out, anno, properties, options);
line = line.trim(); if ( ! line.isEmpty()) { Annotation annotation = tokenizer.process(line); pipeline.annotate(annotation); for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
String text = "I am feeling very sad and frustrated."; Properties props = new Properties(); props.setProperty("annotators", "tokenize, ssplit, pos, lemma, parse, sentiment"); StanfordCoreNLP pipeline = new StanfordCoreNLP(props); <...> Annotation annotation = pipeline.process(text); List<CoreMap> sentences = annotation.get(CoreAnnotations.SentencesAnnotation.class); for (CoreMap sentence : sentences) { String sentiment = sentence.get(SentimentCoreAnnotations.SentimentClass.class); System.out.println(sentiment + "\t" + sentence); }
Properties properties = new Properties(); properties.setProperty("annotators", "tokenize, ssplit, parse"); StanfordCoreNLP pipeline = new StanfordCoreNLP(properties); List<CoreMap> sentences = pipeline.process(SENTENCES) .get(CoreAnnotations.SentencesAnnotation.class); // I just gave a String constant which contains sentences. for (CoreMap sentence : sentences) { System.out.println(sentence.toString()); }
private List<CoreMap> getAnnotations(String text) { Annotation annotation = pipeline.process(text); return annotation.get(CoreAnnotations.SentencesAnnotation.class); }
/** * Runs an interactive shell where input text is processed with the given pipeline * @param pipeline The pipeline to be used * @throws IOException If IO problem with stdin */ private static void shell(StanfordCoreNLP pipeline) throws IOException { BufferedReader is = new BufferedReader(new InputStreamReader(System.in)); PrintWriter os = new PrintWriter(System.out); System.out.println("Entering interactive shell. Type q to quit."); while(true){ System.out.print("NLP> "); String line = is.readLine(); if(line != null && line.length() > 0){ if(line.equalsIgnoreCase("q")) break; Annotation anno = pipeline.process(line); pipeline.prettyPrint(anno, os); } } }
public static void processCoreNLPIfDoesNotExist(File processedFile, Properties coreNLPProps, String text) { if (!processedFile.exists()) { try { StanfordCoreNLP coreNLP = new StanfordCoreNLP(coreNLPProps); Annotation processedAnnotation = coreNLP.process(text); //this document holds the split for paragraphs. ProtobufAnnotationSerializer pas = new ProtobufAnnotationSerializer(true); OutputStream fos = new BufferedOutputStream(new FileOutputStream(processedFile.getAbsolutePath())); pas.write(processedAnnotation, fos); } catch (IOException e) { e.printStackTrace(); } } }
public static void main(String[] args) throws IOException { String text = "This World is an amazing place"; Properties props = new Properties(); props.setProperty("annotators", "tokenize, ssplit, pos, lemma, parse, sentiment"); StanfordCoreNLP pipeline = new StanfordCoreNLP(props); Annotation annotation = pipeline.process(text); List<CoreMap> sentences = annotation.get(CoreAnnotations.SentencesAnnotation.class); for (CoreMap sentence : sentences) { String sentiment = sentence.get(SentimentCoreAnnotations.SentimentClass.class); System.out.println(sentiment + "\t" + sentence); } }
public class SentimentAnalysis { public static void main(String[] args) throws IOException { String text = "I am very happy"; Properties props = new Properties(); props.setProperty("annotators", "tokenize, ssplit, pos, lemma, parse, sentiment"); StanfordCoreNLP pipeline = new StanfordCoreNLP(props); Annotation annotation = pipeline.process(text); List<CoreMap> sentences = annotation .get(CoreAnnotations.SentencesAnnotation.class); for (CoreMap sentence : sentences) { String sentiment = sentence .get(SentimentCoreAnnotations.ClassName.class); System.out.println(sentiment + "\t" + sentence); } } }
Annotation annotation = process(text);
Annotation anno = pipeline.process(line); switch (outputFormat) { case XML:
Annotation anno = process(line); outputAnnotation(System.out, anno, properties, options);
Properties props = new Properties(); props.put("annotators", "tokenize, ssplit"); StanfordCoreNLP pipeline = new StanfordCoreNLP(props, false); String text = "painting"; Morphology morphology = new Morphology(); Annotation document = pipeline.process(text); List<edu.stanford.nlp.util.CoreMap> sentences = document.get(SentencesAnnotation.class); for(edu.stanford.nlp.util.CoreMap sentence: sentences) { for(CoreLabel token: sentence.get(TokensAnnotation.class)) { String word = token.get(TextAnnotation.class); String tag = ... //get the tag for the current word from somewhere, e.g. an array String lemma = morphology.lemma(word, tag); System.out.println("lemmatized version :" + lemma); } }
if (tweet != null && tweet.length() > 0) { int longest = 0; Annotation annotation = pipeline.process(tweet); for (CoreMap sentence : annotation .get(CoreAnnotations.SentencesAnnotation.class)) {
Annotation annotation = tokenizer.process(line); pipeline.annotate(annotation); for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
line = line.trim(); if ( ! line.isEmpty()) { Annotation annotation = tokenizer.process(line); pipeline.annotate(annotation); for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
@Override public void process(JCas jCas) throws AnalysisEngineProcessException { Annotation document = this.processor.process(jCas.getDocumentText());