protected void postProcessAnnotations(Span[] detectedNames, AnnotationFS[] nameAnnotations) { if (probabilityFeature != null) { double[] probs = mNameFinder.probs(detectedNames); for (int i = 0; i < nameAnnotations.length; i++) { nameAnnotations[i].setDoubleValue(probabilityFeature, probs[i]); } } }
/** * sets the probs for the spans * * @param spans * @return */ private Span[] setProbs(Span[] spans) { double[] probs = probs(spans); if (probs != null) { for (int i = 0; i < probs.length; i++) { double prob = probs[i]; spans[i] = new Span(spans[i], prob); } } return spans; }
protected Span[] find(CAS cas, String[] tokens) { Span[] names = mNameFinder.find(tokens); double[] probs = mNameFinder.probs(); for (double prob : probs) { documentConfidence.add(prob); } return names; }
protected void postProcessAnnotations(Span[] detectedNames, AnnotationFS[] nameAnnotations) { if (probabilityFeature != null) { double[] probs = mNameFinder.probs(detectedNames); for (int i = 0; i < nameAnnotations.length; i++) { nameAnnotations[i].setDoubleValue(probabilityFeature, probs[i]); } } }
/** * sets the probs for the spans * * @param spans * @return */ private Span[] setProbs(Span[] spans) { double[] probs = probs(spans); if (probs != null) { for (int i = 0; i < probs.length; i++) { double prob = probs[i]; spans[i] = new Span(spans[i], prob); } } return spans; }
/** * sets the probs for the spans * * @param spans * @return */ private Span[] setProbs(Span[] spans) { double[] probs = probs(spans); if (probs != null) { for (int i = 0; i < probs.length; i++) { double prob = probs[i]; spans[i] = new Span(spans[i], prob); } } return spans; }
protected Span[] find(CAS cas, String[] tokens) { Span[] names = mNameFinder.find(tokens); double[] probs = mNameFinder.probs(); for (double prob : probs) { documentConfidence.add(prob); } return names; }
public Map<String, Set<String>> tokenize(String content) { Map<String, Set<String>> namedEntities = Maps.newHashMap(); List<TextAnnotation> allTextAnnotations = new ArrayList<TextAnnotation>(); String[] tokens = SimpleTokenizer.INSTANCE.tokenize(content); for (Map.Entry<String, TokenNameFinderModel> finderEntry : finders.entrySet()) { String type = finderEntry.getKey(); NameFinderME finder = new NameFinderME(finderEntry.getValue()); Span[] spans = finder.find(tokens); double[] probs = finder.probs(spans); for (int ni = 0; ni < spans.length; ni++) { allTextAnnotations.add(new TextAnnotation(type, spans[ni], probs[ni])); } } if (allTextAnnotations.size() > 0 ) { removeConflicts(allTextAnnotations); } convertTextAnnotationsToNamedEntities(tokens, allTextAnnotations, namedEntities); return namedEntities; }
double[] probs = personFinder.probs(); String[] names = Span.spansToStrings(find, tokens); for (int i = 0; i < names.length; i++) {
double[] probs = finder.probs();
final double[] spanProbs = model.getNameFinder().probs(nameSpans);
final double[] spanProbs = model.getNameFinder().probs(nameSpans);
String[] tokens = Span.spansToStrings(tokenSpans, sentence); Span[] nameSpans = finder.find(tokens); double[] probs = finder.probs();
@Override public void annotate(Document document) { Collection<TokenNameFinderModel> models = loadModels(document.getLanguage()); for (Annotation sentence : document.sentences()) { List<Annotation> tokenList = sentence.tokens(); String[] tokens = tokenList.stream().map(Object::toString).toArray(String[]::new); for (TokenNameFinderModel model : models) { NameFinderME finder = new NameFinderME(model); opennlp.tools.util.Span[] spans = finder.find(tokens); double[] probs = finder.probs(spans); for (int i = 0; i < spans.length; i++) { opennlp.tools.util.Span span = spans[i]; document.annotationBuilder() .type(OPENNLP_ENTITY) .bounds(tokenList.get(span.getStart()).union(tokenList.get(span.getEnd() - 1))) .attribute(Types.ENTITY_TYPE, EntityType.create(span.getType().toUpperCase())) .attribute(Types.CONFIDENCE, probs[i]) .createAttached(); } } } }
probabilities = ((NameFinderME) nameFinder).probs(reducedNames); } else {