public static Token previousRealToken(CommonTokenStream tokens, int i) { int size = tokens.size(); i--; // search before current i token if ( i>=size || i<0 ) return null; Token t = tokens.get(i); while ( t.getChannel()==Token.HIDDEN_CHANNEL ) { i--; if ( i<0 ) return null; t = tokens.get(i); } return t; }
/** Get start/stop of an entire rule including semi and then clean up * WS at end. */ public static String getRuleText(CommonTokenStream tokens, ParserRuleContext ruleDefNode) { Token stop = ruleDefNode.getStop(); Token semi = stop; TerminalNode colonNode = ruleDefNode.getToken(ANTLRv4Parser.COLON, 0); Token colon = colonNode.getSymbol(); Token beforeSemi = tokens.get(stop.getTokenIndex()-1); Token afterColon = tokens.get(colon.getTokenIndex()+1); // trim whitespace/comments before / after rule text List<Token> ignoreBefore = tokens.getHiddenTokensToRight(colon.getTokenIndex()); List<Token> ignoreAfter = tokens.getHiddenTokensToLeft(semi.getTokenIndex()); Token textStart = afterColon; Token textStop = beforeSemi; if ( ignoreBefore!=null ) { Token lastWSAfterColon = ignoreBefore.get(ignoreBefore.size()-1); textStart = tokens.get(lastWSAfterColon.getTokenIndex()+1); } if ( ignoreAfter!=null ) { int firstWSAtEndOfRule = ignoreAfter.get(0).getTokenIndex()-1; textStop = tokens.get(firstWSAtEndOfRule); // stop before 1st ignore token at end } return tokens.getText(textStart, textStop); }
UniformPair<Integer> current = ranges.get(rangeIndex); for (int i = 0; i < tokens.size(); i++) { Token t = tokens.get(i); if (t.getType() == EsperEPL2GrammarLexer.EOF) { break;
public static Token nextRealToken(CommonTokenStream tokens, int i) { int n = tokens.size(); i++; // search after current i token if ( i>=n || i<0 ) return null; Token t = tokens.get(i); while ( t.getChannel()==Token.HIDDEN_CHANNEL ) { if ( t.getType()==Token.EOF ) { TokenSource tokenSource = tokens.getTokenSource(); if ( tokenSource==null ) { return new CommonToken(Token.EOF, "EOF"); } TokenFactory<?> tokenFactory = tokenSource.getTokenFactory(); if ( tokenFactory==null ) { return new CommonToken(Token.EOF, "EOF"); } return tokenFactory.create(Token.EOF, "EOF"); } i++; if ( i>=n ) return null; // just in case no EOF t = tokens.get(i); } return t; }
private static Pair<String, Integer> findScriptName(int start, CommonTokenStream tokens) { String lastIdent = null; int lastIdentIndex = 0; for (int i = start; i < tokens.size(); i++) { if (tokens.get(i).getType() == EsperEPL2GrammarParser.IDENT) { lastIdent = tokens.get(i).getText(); lastIdentIndex = i; } if (tokens.get(i).getType() == EsperEPL2GrammarParser.LPAREN) { break; } // find beginning of script, ignore brackets if (tokens.get(i).getType() == EsperEPL2GrammarParser.LBRACK && tokens.get(i + 1).getType() != EsperEPL2GrammarParser.RBRACK) { break; } } if (lastIdent == null) { throw new IllegalStateException("Failed to parse expression name"); } return new Pair<String, Integer>(lastIdent, lastIdentIndex); }
UniformPair<Integer> current = ranges.get(rangeIndex); for (int i = 0; i < tokens.size(); i++) { Token t = tokens.get(i); if (t.getType() == EsperEPL2GrammarLexer.EOF) { break;
private static Pair<String, Integer> findScriptName(int start, CommonTokenStream tokens) { String lastIdent = null; int lastIdentIndex = 0; for (int i = start; i < tokens.size(); i++) { if (tokens.get(i).getType() == EsperEPL2GrammarParser.IDENT) { lastIdent = tokens.get(i).getText(); lastIdentIndex = i; } if (tokens.get(i).getType() == EsperEPL2GrammarParser.LPAREN) { break; } // find beginning of script, ignore brackets if (tokens.get(i).getType() == EsperEPL2GrammarParser.LBRACK && tokens.get(i + 1).getType() != EsperEPL2GrammarParser.RBRACK) { break; } } if (lastIdent == null) { throw new IllegalStateException("Failed to parse expression name"); } return new Pair<String, Integer>(lastIdent, lastIdentIndex); }
private static int findStartTokenScript(int startIndex, CommonTokenStream tokens, int tokenTypeSearch) { int found = -1; for (int i = startIndex; i < tokens.size(); i++) { if (tokens.get(i).getType() == tokenTypeSearch) { return i; } } return found; }
private static Token getTokenBefore(int i, CommonTokenStream tokens) { int position = i - 1; while (position >= 0) { Token t = tokens.get(position); if (t.getChannel() != 99 && t.getType() != EsperEPL2GrammarLexer.WS) { return t; } position--; } return null; }
public static List<Token> getRealTokens(CommonTokenStream tokens) { List<Token> real = new ArrayList<>(); for (int i=0; i<tokens.size(); i++) { Token t = tokens.get(i); if ( t.getType()!=Token.EOF && t.getChannel()==Lexer.DEFAULT_TOKEN_CHANNEL ) { real.add(t); } } return real; }
private static int findStartTokenScript(int startIndex, CommonTokenStream tokens, int tokenTypeSearch) { int found = -1; for (int i = startIndex; i < tokens.size(); i++) { if (tokens.get(i).getType() == tokenTypeSearch) { return i; } } return found; }
private static Token getTokenBefore(int i, CommonTokenStream tokens) { int position = i - 1; while (position >= 0) { Token t = tokens.get(position); if (t.getChannel() != 99 && t.getType() != EsperEPL2GrammarLexer.WS) { return t; } position--; } return null; }
/** Search backwards from tokIndex into 'tokens' stream and get all on-channel * tokens on previous line with respect to token at tokIndex. * return empty list if none found. First token in returned list is * the first token on the line. */ public static List<Token> getTokensOnPreviousLine(CommonTokenStream tokens, int tokIndex, int curLine) { // first find previous line by looking for real token on line < tokens.get(i) int prevLine = 0; for (int i = tokIndex-1; i>=0; i--) { Token t = tokens.get(i); if ( t.getChannel()==Token.DEFAULT_CHANNEL && t.getLine()<curLine ) { prevLine = t.getLine(); tokIndex = i; // start collecting at this index break; } } // Now collect the on-channel real tokens for this line List<Token> online = new ArrayList<>(); for (int i = tokIndex; i>=0; i--) { Token t = tokens.get(i); if ( t.getChannel()==Token.DEFAULT_CHANNEL ) { if ( t.getLine()<prevLine ) break; // found last token on that previous line online.add(t); } } Collections.reverse(online); return online; }
int found = -1; for (int i = startIndex; i < tokens.size(); i++) { if (tokens.get(i).getType() == tokenTypeSearch) { for (int j = i + 1; j < tokens.size(); j++) { Token next = tokens.get(j); if (next.getChannel() == 0) { if (afterScriptTokens.contains(next.getType())) { if (tokens.get(i).getType() == tokenTypeSearch) { indexLast = i;
int found = -1; for (int i = startIndex; i < tokens.size(); i++) { if (tokens.get(i).getType() == tokenTypeSearch) { for (int j = i + 1; j < tokens.size(); j++) { Token next = tokens.get(j); if (next.getChannel() == 0) { if (afterScriptTokens.contains(next.getType())) { if (tokens.get(i).getType() == tokenTypeSearch) { indexLast = i;
private static ScriptResult rewriteTokensScript(CommonTokenStream tokens) { List<String> scripts = new ArrayList<String>(); List<UniformPair<Integer>> scriptTokenIndexRanges = new ArrayList<UniformPair<Integer>>(); for (int i = 0; i < tokens.size(); i++) { if (tokens.get(i).getType() == EsperEPL2GrammarParser.EXPRESSIONDECL) { Token tokenBefore = getTokenBefore(i, tokens); boolean isCreateExpressionClause = tokenBefore != null && tokenBefore.getType() == EsperEPL2GrammarParser.CREATE; Pair<String, Integer> nameAndNameStart = findScriptName(i + 1, tokens); int startIndex = findStartTokenScript(nameAndNameStart.getSecond(), tokens, EsperEPL2GrammarParser.LBRACK); if (startIndex != -1) { int endIndex = findEndTokenScript(startIndex + 1, tokens, EsperEPL2GrammarParser.RBRACK, EsperEPL2GrammarParser.getAfterScriptTokens(), !isCreateExpressionClause); if (endIndex != -1) { StringWriter writer = new StringWriter(); for (int j = startIndex + 1; j < endIndex; j++) { writer.append(tokens.get(j).getText()); } scripts.add(writer.toString()); scriptTokenIndexRanges.add(new UniformPair<Integer>(startIndex, endIndex)); } } } } String rewrittenEPL = rewriteScripts(scriptTokenIndexRanges, tokens); return new ScriptResult(rewrittenEPL, scripts); }
private static ScriptResult rewriteTokensScript(CommonTokenStream tokens) { List<String> scripts = new ArrayList<String>(); List<UniformPair<Integer>> scriptTokenIndexRanges = new ArrayList<UniformPair<Integer>>(); for (int i = 0; i < tokens.size(); i++) { if (tokens.get(i).getType() == EsperEPL2GrammarParser.EXPRESSIONDECL) { Token tokenBefore = getTokenBefore(i, tokens); boolean isCreateExpressionClause = tokenBefore != null && tokenBefore.getType() == EsperEPL2GrammarParser.CREATE; Pair<String, Integer> nameAndNameStart = findScriptName(i + 1, tokens); int startIndex = findStartTokenScript(nameAndNameStart.getSecond(), tokens, EsperEPL2GrammarParser.LBRACK); if (startIndex != -1) { int endIndex = findEndTokenScript(startIndex + 1, tokens, EsperEPL2GrammarParser.RBRACK, EsperEPL2GrammarParser.getAfterScriptTokens(), !isCreateExpressionClause); if (endIndex != -1) { StringWriter writer = new StringWriter(); for (int j = startIndex + 1; j < endIndex; j++) { writer.append(tokens.get(j).getText()); } scripts.add(writer.toString()); scriptTokenIndexRanges.add(new UniformPair<Integer>(startIndex, endIndex)); } } } } String rewrittenEPL = rewriteScripts(scriptTokenIndexRanges, tokens); return new ScriptResult(rewrittenEPL, scripts); }
private static boolean isContainsScriptExpression(CommonTokenStream tokens) { for (int i = 0; i < tokens.size(); i++) { if (tokens.get(i).getType() == EsperEPL2GrammarParser.EXPRESSIONDECL) { int startTokenLcurly = findStartTokenScript(i + 1, tokens, EsperEPL2GrammarParser.LCURLY); int startTokenLbrack = findStartTokenScript(i + 1, tokens, EsperEPL2GrammarParser.LBRACK); // Handle: // expression ABC { some[other] } // expression boolean js:doit(...) [ {} ] if (startTokenLbrack != -1 && (startTokenLcurly == -1 || startTokenLcurly > startTokenLbrack)) { return true; } } } return false; }
private static boolean isContainsScriptExpression(CommonTokenStream tokens) { for (int i = 0; i < tokens.size(); i++) { if (tokens.get(i).getType() == EsperEPL2GrammarParser.EXPRESSIONDECL) { int startTokenLcurly = findStartTokenScript(i + 1, tokens, EsperEPL2GrammarParser.LCURLY); int startTokenLbrack = findStartTokenScript(i + 1, tokens, EsperEPL2GrammarParser.LBRACK); // Handle: // expression ABC { some[other] } // expression boolean js:doit(...) [ {} ] if (startTokenLbrack != -1 && (startTokenLcurly == -1 || startTokenLcurly > startTokenLbrack)) { return true; } } } return false; }
@Override public Tree parse(String slangCode) { SLangLexer lexer = new SLangLexer(CharStreams.fromString(slangCode)); List<Comment> comments = new ArrayList<>(); CommonTokenStream antlrTokens = new CommonTokenStream(lexer); antlrTokens.fill(); List<org.sonarsource.slang.api.Token> tokens = new ArrayList<>(); for (int index = 0; index < antlrTokens.size(); index++) { Token token = antlrTokens.get(index); TextRange textRange = getSlangTextRange(token); if (token.getChannel() == 1) { comments.add(comment(token, textRange)); } else { Type type = Type.OTHER; if (KEYWORD_TOKEN_TYPES.contains(token.getType())) { type = Type.KEYWORD; } else if (token.getType() == SLangParser.StringLiteral) { type = Type.STRING_LITERAL; } tokens.add(new TokenImpl(textRange, token.getText(), type)); } } SLangParser parser = new SLangParser(antlrTokens); parser.setErrorHandler(new ErrorStrategy()); SLangParseTreeVisitor slangVisitor = new SLangParseTreeVisitor(comments, tokens); return slangVisitor.visit(parser.slangFile()); }