tokens.getTokens();
tokens.getTokens();
public List getTokens(int start, int stop) { return super.getTokens(start, stop); }// end getTokens()
CommonTokenStream tokens = ... tokens.fill(); StringBuilder sb = new StringBuilder(); for (Token token : tokens.getTokens()) { sb.append(((YourCustomTokenType) token).toString()); } System.out.print(sb.toString());
protected List<CommonToken> extractComments(CommonTokenStream stream) { List<CommonToken> comments = new ArrayList<CommonToken>(); for (Object t : stream.getTokens()) { CommonToken token = (CommonToken) t; if (token.getType() == EolLexer.COMMENT || token.getType() == EolParser.LINE_COMMENT) { comments.add(token); } } return comments; }
CSVLexer lexer = new CSVLexer(new ANTLRInputStream("a,b,\"c\"\"c\"")); CommonTokenStream tokens = new CommonTokenStream(lexer); tokens.fill(); for (Token token : tokens.getTokens()) { if (token.getType() == Token.EOF) { break; } System.out.printf("%-12s --> %s\n", CSVLexer.ruleNames[token.getType() - 1], token.getText()); }
List tokenList = tokens.getTokens();
private List<Token> tokenizeMethod(String method) { JavaLexer lex = new JavaLexer(new ANTLRInputStream(method)); CommonTokenStream tokStream = new CommonTokenStream(lex); tokStream.fill(); return tokStream.getTokens(); } /** * Returns the number of reserved words inside the given method, using lexical analysis * @param method The method text */ private int countReservedWords(String method) { int count = 0; for(Token t : tokenizeMethod(method)) { if(t.getType() <= JavaLexer.WHILE) { count++; } } return count; }
import org.antlr.runtime.*; public class CSVLexerTest { public static void main(String[] args) throws Exception { // the input source String source = "val1, value2, value3, value3.2" + "\n" + "\"line\nbreak\",ABAbb,end"; // create an instance of the lexer CSVLexer lexer = new CSVLexer(new ANTLRStringStream(source)); // wrap a token-stream around the lexer and fill the tokens-list CommonTokenStream tokens = new CommonTokenStream(lexer); tokens.fill(); // traverse the tokens and print them to see if the correct tokens are created // tokens.toString(); int n = 1; for (Object o : tokens.getTokens()) { CommonToken token = (CommonToken) o; System.out.println("token(" + n + ") = " + token.getText().replace("\n", "\\n")); n++; } } }
import org.antlr.runtime.*; public class Main { public static void main(String[] args) throws Exception { JavaCommentLexer lexer = new JavaCommentLexer(new ANTLRFileStream("Test.java")); CommonTokenStream tokens = new CommonTokenStream(lexer); for(Object o : tokens.getTokens()) { CommonToken t = (CommonToken)o; if(t.getType() == JavaCommentLexer.SingleLineComment) { System.out.println("SingleLineComment :: " + t.getText().replace("\n", "\\n")); } if(t.getType() == JavaCommentLexer.MultiLineComment) { System.out.println("MultiLineComment :: " + t.getText().replace("\n", "\\n")); } } } }
FuzzyJavaLexer lexer = new FuzzyJavaLexer(in); CommonTokenStream tokens = new CommonTokenStream(lexer); for(Object obj : tokens.getTokens()) { Token token = (Token)obj; if(token.getType() == FuzzyJavaLexer.SingleLineComment) {
/** * Verify the input has been properly consumed */ protected void checkForValidInput(CommonTokenStream tokens, PrintStream ps2) { if ( tokens.index() != tokens.size() - 1 ) { //At this point we need to check for redundant EOF tokens //which might have been added by the Parser: List<? extends Token> endingTokens = tokens.getTokens(tokens.index(), tokens.size() -1); for (Token endToken : endingTokens) { if (! "<EOF>".equals(endToken.getText())) { //writing to ps2 will mark the test as failed: ps2.print( "Invalid input" ); return; } } } }
/** * Verify the input has been properly consumed */ protected void checkForValidInput(CommonTokenStream tokens, PrintStream ps2) { if ( tokens.index() != tokens.size() - 1 ) { //At this point we need to check for redundant EOF tokens //which might have been added by the Parser: List<? extends Token> endingTokens = tokens.getTokens(tokens.index(), tokens.size() -1); for (Token endToken : endingTokens) { if (! "<EOF>".equals(endToken.getText())) { //writing to ps2 will mark the test as failed: ps2.print( "Invalid input" ); return; } } } }
/** * Verify the input has been properly consumed */ protected void checkForValidInput(CommonTokenStream tokens, PrintStream ps2) { if ( tokens.index() != tokens.size() - 1 ) { //At this point we need to check for redundant EOF tokens //which might have been added by the Parser: List<? extends Token> endingTokens = tokens.getTokens(tokens.index(), tokens.size() -1); for (Token endToken : endingTokens) { if (! "<EOF>".equals(endToken.getText())) { //writing to ps2 will mark the test as failed: ps2.print( "Invalid input" ); return; } } } }
private String getUnconsumedTokens(CommonTokenStream tokens) { // ensure we've buffered all tokens from the underlying TokenSource tokens.fill(); if ( tokens.index() == tokens.size() - 1 ) { return null; } StringBuilder nonEofEndingTokens = new StringBuilder(); @SuppressWarnings("unchecked") List<Token> unconsumed = (List<Token>) tokens.getTokens( tokens.index(), tokens.size() - 1 ); for ( Token endToken : unconsumed ) { // Ignore <EOF> tokens as they might be inserted by the parser if ( endToken.getType() != Token.EOF ) { nonEofEndingTokens.append( endToken.getText() ); } } return nonEofEndingTokens.length() > 0 ? nonEofEndingTokens.toString() : null; } }
protected void parseFile(VirtualFile file, VirtualFile srcDir) throws Exception { if (file.getName().endsWith(".ceylon") && (sourceFiles.isEmpty() || sourceFiles.contains(file))) { //System.out.println("Parsing " + file.getName()); CeylonLexer lexer = new CeylonLexer(new ANTLRInputStream(file.getInputStream(), getEncoding())); CommonTokenStream tokenStream = new CommonTokenStream(lexer); CeylonParser parser = new CeylonParser(tokenStream); Tree.CompilationUnit cu = parser.compilationUnit(); PhasedUnit phasedUnit = new PhasedUnit(file, srcDir, cu, moduleSourceMapper.getCurrentPackage(), moduleManager, moduleSourceMapper, context, new ArrayList<Token>(tokenStream.getTokens())); addPhasedUnit(file, phasedUnit); List<LexError> lexerErrors = lexer.getErrors(); for (LexError le : lexerErrors) { //System.out.println("Lexer error in " + file.getName() + ": " + le.getMessage()); cu.addLexError(le); } lexerErrors.clear(); List<ParseError> parserErrors = parser.getErrors(); for (ParseError pe : parserErrors) { //System.out.println("Parser error in " + file.getName() + ": " + pe.getMessage()); cu.addParseError(pe); } parserErrors.clear(); } }
bs.add(org.apache.uima.ruta.parser.RutaParser.LINE_COMMENT); bs.add(org.apache.uima.ruta.parser.RutaParser.COMMENT); List<CommonToken> comments = (List<CommonToken>) tokenStream.getTokens(0, tokenStream.size(), bs);
tokens.getTokens();
tokens.getTokens();