/** * Constructor for when only a single child from anywhere in this vector is available. It is * assumed that the <code>previous</code> and <code>next</code> links are filled in by every * child. * * @param c Any child in this vector. * @param s The offset at which this sentence starts. * @param e The offset at which this sentence ends. **/ public LinkedVector(LinkedChild c, int s, int e) { super(s, e); children = new Vector(); while (c.previous != null) c = c.previous; for (; c != null; c = c.next) add(c); }
public static void addTokenToSentence(LinkedVector sentence, NEWord word) { Vector<NEWord> v = NEWord.splitWord(word); if (ParametersForLbjCode.currentParameters.tokenizationScheme .equals(TokenizationScheme.DualTokenizationScheme)) { sentence.add(word); word.parts = new String[v.size()]; for (int j = 0; j < v.size(); j++) word.parts[j] = v.elementAt(j).form; } else { if (ParametersForLbjCode.currentParameters.tokenizationScheme .equals(TokenizationScheme.LbjTokenizationScheme)) { for (int j = 0; j < v.size(); j++) sentence.add(v.elementAt(j)); } else { System.err .println("Fatal error in BracketFileManager.readAndAnnotate - unrecognized tokenization scheme: " + ParametersForLbjCode.currentParameters.tokenizationScheme); System.exit(0); } } }
public static void addTokenToSentence(LinkedVector sentence, NEWord word) { Vector<NEWord> v = NEWord.splitWord(word); if (word.params.tokenizationScheme .equals(TokenizationScheme.DualTokenizationScheme)) { sentence.add(word); word.parts = new String[v.size()]; for (int j = 0; j < v.size(); j++) word.parts[j] = v.elementAt(j).form; } else { if (word.params.tokenizationScheme .equals(TokenizationScheme.LbjTokenizationScheme)) { for (int j = 0; j < v.size(); j++) sentence.add(v.elementAt(j)); } else { System.err .println("Fatal error in BracketFileManager.readAndAnnotate - unrecognized tokenization scheme: " + word.params.tokenizationScheme); System.exit(0); } } }
public static void addTokenToSentence(LinkedVector sentence, NEWord word) { Vector<NEWord> v = NEWord.splitWord(word); if (word.params.tokenizationScheme .equals(TokenizationScheme.DualTokenizationScheme)) { sentence.add(word); word.parts = new String[v.size()]; for (int j = 0; j < v.size(); j++) word.parts[j] = v.elementAt(j).form; } else { if (word.params.tokenizationScheme .equals(TokenizationScheme.LbjTokenizationScheme)) { for (int j = 0; j < v.size(); j++) sentence.add(v.elementAt(j)); } else { System.err .println("Fatal error in BracketFileManager.readAndAnnotate - unrecognized tokenization scheme: " + word.params.tokenizationScheme); System.exit(0); } } }
.substring(2))) w.neTypeLevel2 = w.neTypeLevel2.substring(0, 2) + "ENTITY"; sentence.add(w);
.substring(2))) w.neTypeLevel2 = w.neTypeLevel2.substring(0, 2) + "ENTITY"; sentence.add(w);
.substring(2))) w.neTypeLevel2 = w.neTypeLevel2.substring(0, 2) + "ENTITY"; sentence.add(w);
lbjTokenVectorList.add( lbjSentenceTokens ); lbjSentenceTokens = new LinkedVector(); wcurrent.partOfSpeech = tokSpan.getLabel(); Token tcurrent = new Token(wcurrent, tprevious, ""); lbjSentenceTokens.add(tcurrent); if (tprevious != null) { tprevious.next = tcurrent; lbjTokenVectorList.add( lbjSentenceTokens );