containerAnnotation); Iterator<AnnotationFS> containingTokens = cas.createFilteredIterator( allRemoveAnnotations.iterator(), containingConstraint);
protected void process(CAS cas, AnnotationFS sentenceAnnotation) { FSIndex<AnnotationFS> allTokens = cas.getAnnotationIndex(mTokenType); ContainingConstraint containingConstraint = new ContainingConstraint(sentenceAnnotation); String sentence = sentenceAnnotation.getCoveredText(); Iterator<AnnotationFS> containingTokens = cas.createFilteredIterator( allTokens.iterator(), containingConstraint); List<Span> tokenSpans = new LinkedList<>(); while (containingTokens.hasNext()) { AnnotationFS token = containingTokens.next(); tokenSpans.add(new Span(token.getBegin() - sentenceAnnotation.getBegin(), token.getEnd() - sentenceAnnotation.getBegin())); } ParseConverter converter = new ParseConverter(sentence, tokenSpans.toArray(new Span[tokenSpans.size()])); Parse unparsedTree = converter.getParseForTagger(); if (unparsedTree.getChildCount() > 0) { Parse parse = mParser.parse(unparsedTree); // TODO: We need a strategy to handle the case that a full // parse could not be found. What to do in this case? parse = converter.transformParseFromTagger(parse); if (mLogger.isLoggable(Level.INFO)) { StringBuffer parseString = new StringBuffer(); parse.show(parseString); mLogger.log(Level.INFO, parseString.toString()); } createAnnotation(cas, sentenceAnnotation.getBegin(), parse); } }
public static <F extends FeatureStructure> FSIterator<F> filter(CAS cas, FSIterator<F> srcIter, FSMatchConstraint... constraints) { if (constraints.length == 0) { return srcIter; } FSMatchConstraint resultConstr = and(constraints); return cas.createFilteredIterator(srcIter, resultConstr); }
/** * {@inheritDoc} */ @Override public FSIterator<AnnotationFS> extract(CAS cas) { // TODO optimization point - get common ancestor type if any FSIterator<AnnotationFS> allAnnoIter = cas.getAnnotationIndex().iterator(); return cas.createFilteredIterator(allAnnoIter, annoMatchConstraint); }
private void process(CAS tcas, AnnotationFS sentence) { FSIndex<AnnotationFS> allTokens = tcas.getAnnotationIndex(mTokenType); ContainingConstraint containingConstraint = new ContainingConstraint(sentence); List<String> tokens = new ArrayList<String>(); List<String> tags = new ArrayList<String>(); Iterator<AnnotationFS> containingTokens = tcas.createFilteredIterator( allTokens.iterator(), containingConstraint); while (containingTokens.hasNext()) { AnnotationFS tokenAnnotation = (AnnotationFS) containingTokens.next(); String tag = tokenAnnotation.getFeatureValueAsString(mPOSFeature); tokens.add(tokenAnnotation.getCoveredText().trim()); tags.add(tag); } mPOSSamples.add(new POSSample(tokens, tags)); }
containerAnnotation); Iterator<AnnotationFS> containingTokens = cas.createFilteredIterator( allRemoveAnnotations.iterator(), containingConstraint);
private void processChunk(CAS tcas, AnnotationFS chunk) { String chunkTag = chunk.getFeatureValueAsString(mChunkTagFeature); FSIndex<AnnotationFS> tokenIndex = tcas.getAnnotationIndex(mTokenType); ContainingConstraint containingConstraint = new ContainingConstraint(chunk); Iterator<AnnotationFS> tokenIterator = tcas.createFilteredIterator(tokenIndex.iterator(), containingConstraint); List<String> tokens = new ArrayList<String>(); List<String> tags = new ArrayList<String>();; List<String> chunkTags = new ArrayList<String>();; while (tokenIterator.hasNext()) { AnnotationFS tokenAnnotation = tokenIterator.next(); tokens.add(tokenAnnotation.getCoveredText().trim()); tags.add(tokenAnnotation.getFeatureValueAsString(mPOSFeature)); chunkTags.add(chunkTag); } mChunkSamples.add(new ChunkSample(tokens, tags, chunkTags)); }
private void processSentence(CAS tcas, AnnotationFS sentence) { FSIndex<AnnotationFS> chunkIndex = tcas.getAnnotationIndex(mChunkType); ContainingConstraint containingConstraint = new ContainingConstraint(sentence); Iterator<AnnotationFS> chunkIterator = tcas.createFilteredIterator( chunkIndex.iterator(), containingConstraint); while (chunkIterator.hasNext()) { AnnotationFS chunkAnnotation = (AnnotationFS) chunkIterator.next(); processChunk(tcas, (chunkAnnotation)); } }
public FSIterator<AnnotationFS> getFilteredBasicIterator(FSMatchConstraint constraint) { ConstraintFactory cf = cas.getConstraintFactory(); FSMatchConstraint matchConstraint = cf.and(constraint, filter.getDefaultConstraint()); return cas.createFilteredIterator(basicIt, matchConstraint); }
public FSIterator<AnnotationFS> createFilteredIterator(CAS cas, Type basicType) { if (windowAnnotation != null) { FSIterator<AnnotationFS> windowIt = cas.getAnnotationIndex(basicType) .subiterator(windowAnnotation); FSIterator<AnnotationFS> iterator = cas.createFilteredIterator(windowIt, createCurrentConstraint(false)); return iterator; } else { FSIterator<AnnotationFS> iterator = cas.createFilteredIterator( cas.getAnnotationIndex(basicType).iterator(), createCurrentConstraint(false)); return iterator; } }
public FSIterator<AnnotationFS> getFilteredBasicIterator(FSMatchConstraint constraint) { ConstraintFactory cf = cas.getConstraintFactory(); FSMatchConstraint matchConstraint = cf.and(constraint, filter.getDefaultConstraint()); return cas.createFilteredIterator(basicIt, matchConstraint); }
public FSIterator<AnnotationFS> createFilteredIterator(CAS cas, Type basicType) { if (windowAnnotation != null) { FSIterator<AnnotationFS> windowIt = cas.getAnnotationIndex(basicType).subiterator( windowAnnotation); FSIterator<AnnotationFS> iterator = cas.createFilteredIterator(windowIt, createCurrentConstraint(false)); return iterator; } else { FSIterator<AnnotationFS> iterator = cas.createFilteredIterator( cas.getAnnotationIndex(basicType).iterator(), createCurrentConstraint(false)); return iterator; } }
public List<RutaBasic> getBasicsInWindow(AnnotationFS windowAnnotation) { List<RutaBasic> result = new ArrayList<RutaBasic>(); if (windowAnnotation instanceof RutaBasic) { result.add((RutaBasic) windowAnnotation); return result; } FSMatchConstraint defaultConstraint = filter.getDefaultConstraint(); FSIterator<AnnotationFS> iterator = cas.createFilteredIterator( cas.getAnnotationIndex(basicType).subiterator(windowAnnotation), defaultConstraint); while (iterator.isValid()) { result.add((RutaBasic) iterator.get()); iterator.moveToNext(); } return result; }
private void process(CAS tcas, AnnotationFS sentence) { FSIndex<AnnotationFS> allTokens = tcas.getAnnotationIndex(mTokenType); ContainingConstraint containingConstraint = new ContainingConstraint(sentence); Iterator<AnnotationFS> containingTokens = tcas.createFilteredIterator( allTokens.iterator(), containingConstraint); List<Span> openNLPSpans = new LinkedList<Span>(); while (containingTokens.hasNext()) { AnnotationFS tokenAnnotation = containingTokens.next(); openNLPSpans.add(new Span(tokenAnnotation.getBegin() - sentence.getBegin(), tokenAnnotation.getEnd() - sentence.getBegin())); } Span[] spans = openNLPSpans.toArray(new Span[openNLPSpans.size()]); Arrays.sort(spans); tokenSamples.add(new TokenSample(sentence.getCoveredText(), spans)); }
public List<TextMarkerBasic> getBasicsInWindow(AnnotationFS windowAnnotation) { List<TextMarkerBasic> result = new ArrayList<TextMarkerBasic>(); if (windowAnnotation instanceof TextMarkerBasic) { result.add((TextMarkerBasic) windowAnnotation); return result; } FSMatchConstraint defaultConstraint = filter.getDefaultConstraint(); FSIterator<AnnotationFS> iterator = cas.createFilteredIterator(cas .getAnnotationIndex(basicType).subiterator(windowAnnotation), defaultConstraint); while (iterator.isValid()) { result.add((TextMarkerBasic) iterator.get()); iterator.moveToNext(); } return result; }
@Override public Object[] getElements(Object inputElement) { if (mCurrentType == null) { return new Object[] {}; } StrictTypeConstraint typeConstrain = new StrictTypeConstraint(mCurrentType); FSIterator<FeatureStructure> strictTypeIterator =mDocument.getCAS().createFilteredIterator( mDocument.getCAS().getIndexRepository().getAllIndexedFS(mCurrentType), typeConstrain); LinkedList<ModelFeatureStructure> featureStrucutreList = new LinkedList<ModelFeatureStructure>(); while (strictTypeIterator.hasNext()) { featureStrucutreList.add(new ModelFeatureStructure(mDocument, strictTypeIterator.next())); } ModelFeatureStructure[] featureStructureArray = new ModelFeatureStructure[featureStrucutreList .size()]; featureStrucutreList.toArray(featureStructureArray); return featureStructureArray; }
/** * Retrieves annotations of the given type from the {@link CAS}. * * @param type the type * @return the annotations */ @Override public Collection<AnnotationFS> getAnnotations(Type type) { FSIndex<AnnotationFS> annotationIndex = mCAS.getAnnotationIndex(type); StrictTypeConstraint typeConstrain = new StrictTypeConstraint(type); FSIterator<AnnotationFS> strictTypeIterator = mCAS .createFilteredIterator(annotationIndex.iterator(), typeConstrain); return fsIteratorToCollection(strictTypeIterator); }
protected void process(CAS cas, AnnotationFS sentenceAnnotation) { FSIndex<AnnotationFS> allTokens = cas.getAnnotationIndex(mTokenType); ContainingConstraint containingConstraint = new ContainingConstraint(sentenceAnnotation); String sentence = sentenceAnnotation.getCoveredText(); Iterator<AnnotationFS> containingTokens = cas.createFilteredIterator( allTokens.iterator(), containingConstraint); List<Span> tokenSpans = new LinkedList<>(); while (containingTokens.hasNext()) { AnnotationFS token = containingTokens.next(); tokenSpans.add(new Span(token.getBegin() - sentenceAnnotation.getBegin(), token.getEnd() - sentenceAnnotation.getBegin())); } ParseConverter converter = new ParseConverter(sentence, tokenSpans.toArray(new Span[tokenSpans.size()])); Parse unparsedTree = converter.getParseForTagger(); if (unparsedTree.getChildCount() > 0) { Parse parse = mParser.parse(unparsedTree); // TODO: We need a strategy to handle the case that a full // parse could not be found. What to do in this case? parse = converter.transformParseFromTagger(parse); if (mLogger.isLoggable(Level.INFO)) { StringBuffer parseString = new StringBuffer(); parse.show(parseString); mLogger.log(Level.INFO, parseString.toString()); } createAnnotation(cas, sentenceAnnotation.getBegin(), parse); } }
Iterator<AnnotationFS> containingTokens = cas.createFilteredIterator(tokenAnnotations .iterator(), sentenceContainingConstraint); Iterator<AnnotationFS> containingNames = cas.createFilteredIterator(allNames.iterator(), sentenceContainingConstraint);
cas.createFilteredIterator(allAnnotations.iterator(), annotationInSpanAndStrictTypeConstraint);