CommonTokenStream tokens = ... tokens.fill(); StringBuilder sb = new StringBuilder(); for (Token token : tokens.getTokens()) { sb.append(((YourCustomTokenType) token).toString()); } System.out.print(sb.toString());
/** Count EOF just once. */ public int getNumberOfOnChannelTokens() { int n = 0; fill(); for (int i = 0; i < tokens.size(); i++) { Token t = tokens.get(i); if ( t.getChannel()==channel ) n++; if ( t.getType()==Token.EOF ) break; } return n; }
/** Count EOF just once. */ public int getNumberOfOnChannelTokens() { int n = 0; fill(); for (int i = 0; i < tokens.size(); i++) { Token t = tokens.get(i); if ( t.getChannel()==channel ) n++; if ( t.getType()==Token.EOF ) break; } return n; }
/** Count EOF just once. */ public int getNumberOfOnChannelTokens() { int n = 0; fill(); for (int i = 0; i < tokens.size(); i++) { Token t = tokens.get(i); if ( t.getChannel()==channel ) n++; if ( t.getType()==Token.EOF ) break; } return n; }
/** Count EOF just once. */ public int getNumberOfOnChannelTokens() { int n = 0; fill(); for (int i = 0; i < tokens.size(); i++) { Token t = tokens.get(i); if ( t.getChannel()==channel ) n++; if ( t.getType()==Token.EOF ) break; } return n; }
/** Count EOF just once. */ public int getNumberOfOnChannelTokens() { int n = 0; fill(); for (int i = 0; i < tokens.size(); i++) { Token t = tokens.get(i); if ( t.getChannel()==channel ) n++; if ( t.getType()==Token.EOF ) break; } return n; }
CSVLexer lexer = new CSVLexer(new ANTLRInputStream("a,b,\"c\"\"c\"")); CommonTokenStream tokens = new CommonTokenStream(lexer); tokens.fill(); for (Token token : tokens.getTokens()) { if (token.getType() == Token.EOF) { break; } System.out.printf("%-12s --> %s\n", CSVLexer.ruleNames[token.getType() - 1], token.getText()); }
private List<Token> tokenizeMethod(String method) { JavaLexer lex = new JavaLexer(new ANTLRInputStream(method)); CommonTokenStream tokStream = new CommonTokenStream(lex); tokStream.fill(); return tokStream.getTokens(); } /** * Returns the number of reserved words inside the given method, using lexical analysis * @param method The method text */ private int countReservedWords(String method) { int count = 0; for(Token t : tokenizeMethod(method)) { if(t.getType() <= JavaLexer.WHILE) { count++; } } return count; }
import org.antlr.runtime.*; public class CSVLexerTest { public static void main(String[] args) throws Exception { // the input source String source = "val1, value2, value3, value3.2" + "\n" + "\"line\nbreak\",ABAbb,end"; // create an instance of the lexer CSVLexer lexer = new CSVLexer(new ANTLRStringStream(source)); // wrap a token-stream around the lexer and fill the tokens-list CommonTokenStream tokens = new CommonTokenStream(lexer); tokens.fill(); // traverse the tokens and print them to see if the correct tokens are created // tokens.toString(); int n = 1; for (Object o : tokens.getTokens()) { CommonToken token = (CommonToken) o; System.out.println("token(" + n + ") = " + token.getText().replace("\n", "\\n")); n++; } } }
public static void runTemplate( OutputStream response, String fiFile, FeatureCollection col, boolean geometries ) throws IOException { PrintWriter out = new PrintWriter( new OutputStreamWriter( response, "UTF-8" ) ); try { InputStream in; if ( fiFile == null ) { in = FeatureInfoManager.class.getResourceAsStream( "html.gfi" ); } else { in = new FileInputStream( fiFile ); } CharStream input = new ANTLRInputStream( in ); Templating2Lexer lexer = new Templating2Lexer( input ); CommonTokenStream cts = new CommonTokenStream( lexer ); cts.fill(); Templating2Parser parser = new Templating2Parser( cts ); HashMap<String, Object> defs = (HashMap) parser.definitions(); StringBuilder sb = new StringBuilder(); new PropertyTemplateCall( "start", singletonList( "*" ), false ).eval( sb, defs, col, geometries ); out.println( sb.toString() ); } catch ( Throwable e ) { if ( fiFile == null ) { LOG.error( "Could not load internal template for GFI response." ); } else { LOG.error( "Could not load template '{}' for GFI response.", fiFile ); } LOG.trace( "Stack trace:", e ); } finally { out.close(); } }
private String getUnconsumedTokens(CommonTokenStream tokens) { // ensure we've buffered all tokens from the underlying TokenSource tokens.fill(); if ( tokens.index() == tokens.size() - 1 ) { return null; } StringBuilder nonEofEndingTokens = new StringBuilder(); @SuppressWarnings("unchecked") List<Token> unconsumed = (List<Token>) tokens.getTokens( tokens.index(), tokens.size() - 1 ); for ( Token endToken : unconsumed ) { // Ignore <EOF> tokens as they might be inserted by the parser if ( endToken.getType() != Token.EOF ) { nonEofEndingTokens.append( endToken.getText() ); } } return nonEofEndingTokens.length() > 0 ? nonEofEndingTokens.toString() : null; } }
tokenStream.fill(); List tokens = tokenStream.getTokens();