public String[] tag(String[] sentence, Object[] additionaContext) { bestSequence = model.bestSequence(sentence, additionaContext, contextGen, sequenceValidator); List<String> t = bestSequence.getOutcomes(); return t.toArray(new String[t.size()]); }
/** * Retrieves an array of all possible part-of-speech tags from the * tagger. * * @return String[] */ public String[] getAllPosTags() { return model.getOutcomes(); }
public Sequence[] topKLemmaClasses(String[] sentence, String[] tags, double minSequenceScore) { return model.bestSequences(DEFAULT_BEAM_SIZE, sentence, new Object[] { tags }, minSequenceScore, contextGenerator, sequenceValidator); } }
public Sequence[] topKLemmaClasses(String[] sentence, String[] tags) { return model.bestSequences(DEFAULT_BEAM_SIZE, sentence, new Object[] { tags }, contextGenerator, sequenceValidator); }
/** * Predict Short Edit Script (automatically induced lemma class). * @param toks the array of tokens * @param tags the array of pos tags * @return an array containing the lemma classes */ public String[] predictSES(String[] toks, String[] tags) { bestSequence = model.bestSequence(toks, new Object[] {tags}, contextGenerator, sequenceValidator); List<String> ses = bestSequence.getOutcomes(); return ses.toArray(new String[ses.size()]); }
public TokenNameFinderModel(String languageCode, SequenceClassificationModel<String> nameFinderModel, byte[] generatorDescriptor, Map<String, Object> resources, Map<String, String> manifestInfoEntries, SequenceCodec<String> seqCodec, TokenNameFinderFactory factory) { super(COMPONENT_NAME, languageCode, manifestInfoEntries, factory); init(nameFinderModel, generatorDescriptor, resources, manifestInfoEntries, seqCodec); if (!seqCodec.areOutcomesCompatible(nameFinderModel.getOutcomes())) { throw new IllegalArgumentException("Model not compatible with name finder!"); } }
public Sequence[] topKSequences(String[] sentence, String[] tags) { return model.bestSequences(DEFAULT_BEAM_SIZE, sentence, new Object[] { tags }, contextGenerator, sequenceValidator); }
public String[] chunk(String[] toks, String[] tags) { TokenTag[] tuples = TokenTag.create(toks, tags); bestSequence = model.bestSequence(tuples, new Object[] {}, contextGenerator, sequenceValidator); List<String> c = bestSequence.getOutcomes(); return c.toArray(new String[c.size()]); }
private boolean hasOtherAsOutcome(TokenNameFinderModel nameFinderModel) { SequenceClassificationModel<String> model = nameFinderModel.getNameFinderSequenceModel(); String[] outcomes = model.getOutcomes(); for (String outcome : outcomes) { if (outcome.equals(NameFinderME.OTHER)) { return true; } } return false; }
public Sequence[] topKSequences(String[] sentence, String[] tags, double minSequenceScore) { return model.bestSequences(DEFAULT_BEAM_SIZE, sentence, new Object[] { tags }, minSequenceScore, contextGenerator, sequenceValidator); }
/** * Generates name tags for the given sequence, typically a sentence, returning * token spans for any identified names. * * @param tokens an array of the tokens or words of the sequence, typically a sentence. * @param additionalContext features which are based on context outside of the * sentence but which should also be used. * * @return an array of spans for each of the names identified. */ public Span[] find(String[] tokens, String[][] additionalContext) { additionalContextFeatureGenerator.setCurrentContext(additionalContext); bestSequence = model.bestSequence(tokens, additionalContext, contextGenerator, sequenceValidator); List<String> c = bestSequence.getOutcomes(); contextGenerator.updateAdaptiveData(tokens, c.toArray(new String[c.size()])); Span[] spans = seqCodec.decode(c); spans = setProbs(spans); return spans; }
/** * Retrieves an array of all possible part-of-speech tags from the * tagger. * * @return String[] */ public String[] getAllPosTags() { return model.getOutcomes(); }
public Sequence[] topKSequences(String[] sentence, Object[] additionaContext) { return model.bestSequences(size, sentence, additionaContext, contextGen, sequenceValidator); }
/** * Predict Short Edit Script (automatically induced lemma class). * @param toks the array of tokens * @param tags the array of pos tags * @return an array containing the lemma classes */ public String[] predictSES(String[] toks, String[] tags) { bestSequence = model.bestSequence(toks, new Object[] {tags}, contextGenerator, sequenceValidator); List<String> ses = bestSequence.getOutcomes(); return ses.toArray(new String[ses.size()]); }
/** * Retrieves an array of all possible part-of-speech tags from the * tagger. * * @return String[] */ public String[] getAllPosTags() { return model.getOutcomes(); }
public Sequence[] topKSequences(String[] sentence, String[] tags) { TokenTag[] tuples = TokenTag.create(sentence, tags); return model.bestSequences(DEFAULT_BEAM_SIZE, tuples, new Object[] { }, contextGenerator, sequenceValidator); }
public String[] tag(String[] sentence, Object[] additionaContext) { bestSequence = model.bestSequence(sentence, additionaContext, contextGen, sequenceValidator); List<String> t = bestSequence.getOutcomes(); return t.toArray(new String[t.size()]); }
/** * Retrieves an array of all possible automatically induced lemma classes from * the lemmatizer. * @return all the possible lemma classes */ public String[] getAllLemmaClasses() { return model.getOutcomes(); }
public Sequence[] topKSequences(String[] sentence, String[] tags, double minSequenceScore) { TokenTag[] tuples = TokenTag.create(sentence, tags); return model.bestSequences(DEFAULT_BEAM_SIZE, tuples, new Object[] { }, minSequenceScore, contextGenerator, sequenceValidator); }
/** * Predict Short Edit Script (automatically induced lemma class). * @param toks the array of tokens * @param tags the array of pos tags * @return an array containing the lemma classes */ public String[] predictSES(String[] toks, String[] tags) { bestSequence = model.bestSequence(toks, new Object[] {tags}, contextGenerator, sequenceValidator); List<String> ses = bestSequence.getOutcomes(); return ses.toArray(new String[ses.size()]); }