/** * A utility method that tokenizes a range of text. */ protected void annotateRange(JCas jcas, int beginPos, int endPos) throws AnalysisEngineProcessException { String text = jcas.getDocumentText().substring(beginPos, endPos); List<Token> tokens = null; try { tokens = tokenizer.tokenizeAndSort(text); } catch (Exception e) { throw new AnalysisEngineProcessException(e); } Iterator<Token> tokenItr = tokens.iterator(); while (tokenItr.hasNext()) { Token token = tokenItr.next(); // convert token into JCas object BaseToken bta = TokenConverter.convert(token, jcas, beginPos); bta.setTokenNumber(tokenCount); // add JCas object to CAS index bta.addToIndexes(); tokenCount++; } } }
/** * A utility method that tokenizes a range of text. */ protected void annotateRange(JCas jcas, int beginPos, int endPos) throws AnalysisEngineProcessException { String text = jcas.getDocumentText().substring(beginPos, endPos); List<Token> tokens = null; try { tokens = tokenizer.tokenizeAndSort(text); } catch (Exception e) { throw new AnalysisEngineProcessException(e); } Iterator<Token> tokenItr = tokens.iterator(); while (tokenItr.hasNext()) { Token token = tokenItr.next(); // convert token into JCas object BaseToken bta = TokenConverter.convert(token, jcas, beginPos); bta.setTokenNumber(tokenCount); // add JCas object to CAS index bta.addToIndexes(); tokenCount++; } } }
baseToken.addToIndexes();
BaseToken btoken = new BaseToken(jCas,wordStart,wordEnd); btoken.setTokenNumber(wordNumber++); btoken.addToIndexes(); documentText.append(tokens[1] + " "); wordStart = wordEnd+1; BaseToken btoken = new BaseToken(jCas,wordStart,wordEnd); btoken.setTokenNumber(wordNumber++); btoken.addToIndexes(); documentText.append(tokens[1] + " "); wordStart = wordEnd+1; btoken.setTokenNumber(wordNumber++); btoken.setPartOfSpeech(tokens[2]); btoken.addToIndexes(); documentText.append(tokens[1] + " "); wordStart = wordEnd+1; btoken.setTokenNumber(wordNumber++); btoken.setNormalizedForm(tokens[2]); btoken.addToIndexes(); documentText.append(tokens[1] + " "); wordStart = wordEnd+1; btoken.setNormalizedForm(tokens[2]); btoken.setPartOfSpeech(tokens[3]); btoken.addToIndexes(); documentText.append(tokens[1] + " "); wordStart = wordEnd+1;
BaseToken btoken = new BaseToken(jCas,wordStart,wordEnd); btoken.setTokenNumber(wordNumber++); btoken.addToIndexes(); documentText.append(tokens[1] + " "); wordStart = wordEnd+1; BaseToken btoken = new BaseToken(jCas,wordStart,wordEnd); btoken.setTokenNumber(wordNumber++); btoken.addToIndexes(); documentText.append(tokens[1] + " "); wordStart = wordEnd+1; btoken.setTokenNumber(wordNumber++); btoken.setPartOfSpeech(tokens[2]); btoken.addToIndexes(); documentText.append(tokens[1] + " "); wordStart = wordEnd+1; btoken.setTokenNumber(wordNumber++); btoken.setNormalizedForm(tokens[2]); btoken.addToIndexes(); documentText.append(tokens[1] + " "); wordStart = wordEnd+1; btoken.setNormalizedForm(tokens[2]); btoken.setPartOfSpeech(tokens[3]); btoken.addToIndexes(); documentText.append(tokens[1] + " "); wordStart = wordEnd+1;
BaseToken.class.cast(bta).addToIndexes(); }else{ throw new AnalysisEngineProcessException("Token returned cannot be cast as BaseToken", new Object[]{bta});
BaseToken.class.cast(bta).addToIndexes(); }else{ throw new AnalysisEngineProcessException("Token returned cannot be cast as BaseToken", new Object[]{bta});
newGoldToken.setTokenNumber(oldSystemToken.getTokenNumber()); newGoldToken.addToIndexes();
newGoldToken.setTokenNumber(oldSystemToken.getTokenNumber()); newGoldToken.addToIndexes();