double minComplete = 2; double bestComplete = -100000; //approximating -infinity/0 in ln domain while (odh.size() > 0 && (completeParses.size() < M || (odh.first()).getProb() < minComplete) && derivationStage < maxDerivationLength) { ndh = new TreeSet<>(); System.out.print(derivationStage + " " + derivationRank + " " + tp.getProb()); tp.show(); System.out.println(); nd = advanceChunks(tp,(ndh.last()).getProb()); if (nd[k].complete()) { advanceTop(nd[k]); if (nd[k].getProb() > bestComplete) { bestComplete = nd[k].getProb(); if (nd[k].getProb() < minComplete) { minComplete = nd[k].getProb();
/** * Converts the parse from the tagger back. * * @param parseFromTagger * @return the final parse */ Parse transformParseFromTagger(Parse parseFromTagger) { int start = parseFromTagger.getSpan().getStart(); int end = parseFromTagger.getSpan().getEnd(); Parse transformedParse = new Parse(mSentence, new Span( mIndexMap.get(start), mIndexMap.get(end)), parseFromTagger.getType(), parseFromTagger.getProb(), parseFromTagger.getHeadIndex()); Parse[] parseFromTaggerChildrens = parseFromTagger.getChildren(); for (Parse child : parseFromTaggerChildrens) { transformedParse.insert(transformParseFromTagger(child)); } return transformedParse; } }
System.out.print(pi + " " + parses[pi].getProb() + " ");
Sequence[] cs = chunker.topKSequences(words, ptags,minChunkScore - p.getProb()); Parse[] newParses = new Parse[cs.length]; for (int si = 0, sl = cs.length; si < sl; si++) {
protected AnnotationFS createAnnotation(CAS cas, int offset, Parse parse) { Parse[] parseChildren = parse.getChildren(); AnnotationFS[] parseChildAnnotations = new AnnotationFS[parseChildren.length]; // do this for all children for (int i = 0; i < parseChildren.length; i++) { parseChildAnnotations[i] = createAnnotation(cas, offset, parseChildren[i]); } AnnotationFS parseAnnotation = cas.createAnnotation(mParseType, offset + parse.getSpan().getStart(), offset + parse.getSpan().getEnd()); parseAnnotation.setStringValue(mTypeFeature, parse.getType()); if (probabilityFeature != null) { parseAnnotation.setDoubleValue(probabilityFeature, parse.getProb()); } ArrayFS childrenArray = cas.createArrayFS(parseChildAnnotations.length); childrenArray.copyFromArray(parseChildAnnotations, 0, 0, parseChildAnnotations.length); parseAnnotation.setFeatureValue(childrenFeature, childrenArray); cas.getIndexRepository().addFS(parseAnnotation); return parseAnnotation; }
double minComplete = 2; double bestComplete = -100000; //approximating -infinity/0 in ln domain while (odh.size() > 0 && (completeParses.size() < M || (odh.first()).getProb() < minComplete) && derivationStage < maxDerivationLength) { ndh = new TreeSet<>(); System.out.print(derivationStage + " " + derivationRank + " " + tp.getProb()); tp.show(); System.out.println(); nd = advanceChunks(tp,(ndh.last()).getProb()); if (nd[k].complete()) { advanceTop(nd[k]); if (nd[k].getProb() > bestComplete) { bestComplete = nd[k].getProb(); if (nd[k].getProb() < minComplete) { minComplete = nd[k].getProb();
double minComplete = 2; double bestComplete = -100000; //approximating -infinity/0 in ln domain while (odh.size() > 0 && (completeParses.size() < M || (odh.first()).getProb() < minComplete) && derivationStage < maxDerivationLength) { ndh = new TreeSet<>(); System.out.print(derivationStage + " " + derivationRank + " " + tp.getProb()); tp.show(); System.out.println(); nd = advanceChunks(tp,(ndh.last()).getProb()); if (nd[k].complete()) { advanceTop(nd[k]); if (nd[k].getProb() > bestComplete) { bestComplete = nd[k].getProb(); if (nd[k].getProb() < minComplete) { minComplete = nd[k].getProb();
tokenEnd = tokenStart + tok.length(); parse.insert(new Parse(line2parse, new Span(tokenStart, tokenEnd), AbstractBottomUpParser.TOK_NODE, parse.getProb(), 0)); tokenStart = tokenEnd + 1;
/** * Converts the parse from the tagger back. * * @param parseFromTagger * @return the final parse */ Parse transformParseFromTagger(Parse parseFromTagger) { int start = parseFromTagger.getSpan().getStart(); int end = parseFromTagger.getSpan().getEnd(); Parse transformedParse = new Parse(mSentence, new Span( mIndexMap.get(start), mIndexMap.get(end)), parseFromTagger.getType(), parseFromTagger.getProb(), parseFromTagger.getHeadIndex()); Parse[] parseFromTaggerChildrens = parseFromTagger.getChildren(); for (Parse child : parseFromTaggerChildrens) { transformedParse.insert(transformParseFromTagger(child)); } return transformedParse; } }
System.out.print(pi + " " + parses[pi].getProb() + " ");
Sequence[] cs = chunker.topKSequences(words, ptags,minChunkScore - p.getProb()); Parse[] newParses = new Parse[cs.length]; for (int si = 0, sl = cs.length; si < sl; si++) {
System.out.print(pi + " " + parses[pi].getProb() + " ");
Sequence[] cs = chunker.topKSequences(words, ptags,minChunkScore - p.getProb()); Parse[] newParses = new Parse[cs.length]; for (int si = 0, sl = cs.length; si < sl; si++) {
protected AnnotationFS createAnnotation(CAS cas, int offset, Parse parse) { Parse[] parseChildren = parse.getChildren(); AnnotationFS[] parseChildAnnotations = new AnnotationFS[parseChildren.length]; // do this for all children for (int i = 0; i < parseChildren.length; i++) { parseChildAnnotations[i] = createAnnotation(cas, offset, parseChildren[i]); } AnnotationFS parseAnnotation = cas.createAnnotation(mParseType, offset + parse.getSpan().getStart(), offset + parse.getSpan().getEnd()); parseAnnotation.setStringValue(mTypeFeature, parse.getType()); if (probabilityFeature != null) { parseAnnotation.setDoubleValue(probabilityFeature, parse.getProb()); } ArrayFS childrenArray = cas.createArrayFS(parseChildAnnotations.length); childrenArray.copyFromArray(parseChildAnnotations, 0, 0, parseChildAnnotations.length); parseAnnotation.setFeatureValue(childrenFeature, childrenArray); cas.getIndexRepository().addFS(parseAnnotation); return parseAnnotation; }