/** * Creates a {@link CharStream} given a {@link Reader}. Closes * the reader before returning. */ public static CodePointCharStream fromReader(Reader r) throws IOException { return fromReader(r, IntStream.UNKNOWN_SOURCE_NAME); }
/** * Creates a {@link CharStream} given a {@link Reader}. Closes * the reader before returning. */ public static CodePointCharStream fromReader(Reader r) throws IOException { return fromReader(r, IntStream.UNKNOWN_SOURCE_NAME); }
/** * Creates a {@link CharStream} given a {@link Reader}. Closes * the reader before returning. */ public static CodePointCharStream fromReader(Reader r) throws IOException { return fromReader(r, IntStream.UNKNOWN_SOURCE_NAME); }
public static Properties parse(Reader r) throws ConfigException, IOException { Configuration conf = new Configuration(); return conf.runparsing(CharStreams.fromReader(r)); }
private static CommonTokenStream getTokens(String source) throws IOException { CharStream is = CharStreams.fromReader(new StringReader(source)); CstrSpecLexer lexer = new CstrSpecLexer(is); lexer.removeErrorListeners(); lexer.addErrorListener(new DiagnosticErrorListener()); return new CommonTokenStream(lexer); }
CodePointCharStream charStream = CharStreams.fromReader(input); VirtDataLexer lexer = new VirtDataLexer(charStream); CommonTokenStream tokens = new CommonTokenStream(lexer);
CodePointCharStream charStream = CharStreams.fromReader(input); VirtDataLexer lexer = new VirtDataLexer(charStream); CommonTokenStream tokens = new CommonTokenStream(lexer);
CodePointCharStream charStream = CharStreams.fromReader(input); MetagenLexer lexer = new MetagenLexer(charStream); CommonTokenStream tokens = new CommonTokenStream(lexer);
public static ExpressionMatcher createExpressionMatcher(Rule rule) { ExpressionParser.ExpressionContext ruleContext = null; if (StringUtils.isNotBlank(rule.getExpression())) { try { CharStream input = CharStreams.fromReader(new StringReader(rule.getExpression())); ExpressionLexer lexer = new ExpressionLexer(input); lexer.removeErrorListeners(); lexer.addErrorListener(new ConsoleErrorListener() { @Override public void syntaxError(Recognizer<?, ?> recognizer, Object offendingSymbol, int line, int charPositionInLine, String msg, RecognitionException e) { log.warn("line " + line + ":" + charPositionInLine + " " + msg); log.warn(input.toString()); } }); ExpressionParser parser = new ExpressionParser(new CommonTokenStream(lexer)); ruleContext = parser.expression(); } catch (IOException e) { throw new RuntimeException(e); } } return new ExpressionMatcher(ruleContext, rule); }
public static final ParseTree getDumpFileParseTree(Reader reader) throws IOException { ANTLRErrorListener listener = new DescriptiveErrorListener(); DumpFileGrammarLexer lexer = new DumpFileGrammarLexer(CharStreams.fromReader(reader)); lexer.removeErrorListeners(); lexer.addErrorListener(listener); // Using SLL first proved not to be useful for the DF parser, so we directly parse with LL prediction mode CommonTokenStream tokens = new CommonTokenStream(lexer); DumpFileGrammarParser parser = new DumpFileGrammarParser(tokens); parser.removeErrorListeners(); parser.addErrorListener(listener); return parser.dump(); }
public static final ParseTree getDumpFileParseTree(Reader reader) throws IOException { ANTLRErrorListener listener = new DescriptiveErrorListener(); DumpFileGrammarLexer lexer = new DumpFileGrammarLexer(CharStreams.fromReader(reader)); lexer.removeErrorListeners(); lexer.addErrorListener(listener); // Using SLL first proved not to be useful for the DF parser, so we directly parse with LL prediction mode CommonTokenStream tokens = new CommonTokenStream(lexer); DumpFileGrammarParser parser = new DumpFileGrammarParser(tokens); parser.removeErrorListeners(); parser.addErrorListener(listener); return parser.dump(); }
@EqualsAndHashCode(exclude = {"match"}) public static class RegexMatch implements IdentifierMatch { @Getter private final String regex; private final Predicate<String> match; @Getter private final Optional<IdentifierMatch> successor; public RegexMatch(String regex) { this(regex, Optional.empty()); } public RegexMatch(@NonNull String regex, @NonNull Optional<IdentifierMatch> successor) { this.regex = regex; this.match = Pattern.compile(regex).asPredicate(); this.successor = successor; } @Override public boolean match(List<String> path, SkipBacktrack skipper) { if (path.isEmpty()) return false; if (!match.test(path.get(0))) return false; return successor .map((succ) -> succ.match(path.subList(1, path.size()), skipper)) .orElse(path.size() == 1); } @Override
public Configuration configuration() throws IOException, ConfigurationException { final DescriptiveErrorListener error_listener = new DescriptiveErrorListener(); final ConfigTokenizer lexer = new ConfigTokenizer(CharStreams.fromReader(reader_)); lexer.removeErrorListeners(); lexer.addErrorListener(error_listener);
@Override public void process(Resource resource, ResourceProperties properties, ViolationHandler violationHandler) throws IOException { final Logger log = violationHandler.getLogger(); final IndentStyleValue indentStyle = properties.getValue(PropertyType.indent_style, null, false); final Integer indentSize = properties.getValue(PropertyType.indent_size, null, false); if (log.isTraceEnabled()) { log.trace("Checking indent_style value '{}' in {}", indentStyle, resource); log.trace("Checking indent_size value '{}' in {}", indentSize, resource); } if (indentStyle == null && indentSize == null) { /* nothing to do */ } else if (indentStyle != null && indentSize != null) { try (Reader in = resource.openReader()) { XmlParser parser = new XmlParser( new CommonTokenStream(new XmlLexer(CharStreams.fromReader(in, resource.toString())))); ParseTree rootContext = parser.document(); ParseTreeWalker walker = new ParseTreeWalker(); walker.walk( new FormatParserListener(this, resource, indentStyle, indentSize.intValue(), violationHandler), rootContext); } } else { log.warn(this.getClass().getName() + " expects both indent_style and indent_size to be set for file '{}'", resource); } }
final ExpressionLexer lexer = new ExpressionLexer(CharStreams.fromReader(reader)); lexer.removeErrorListeners(); lexer.addErrorListener(error_listener);
private void read(Graph<V, E> graph, Reader input, CSVBaseListener listener) throws ImportException { try { ThrowingErrorListener errorListener = new ThrowingErrorListener(); // create lexer CSVLexer lexer = new CSVLexer(CharStreams.fromReader(input)); lexer.setSep(delimiter); lexer.removeErrorListeners(); lexer.addErrorListener(errorListener); // create parser CSVParser parser = new CSVParser(new CommonTokenStream(lexer)); parser.removeErrorListeners(); parser.addErrorListener(errorListener); // Specify our entry point CSVParser.FileContext graphContext = parser.file(); // Walk it and attach our listener ParseTreeWalker walker = new ParseTreeWalker(); walker.walk(listener, graphContext); } catch (IOException e) { throw new ImportException("Failed to import CSV graph: " + e.getMessage(), e); } catch (ParseCancellationException pe) { throw new ImportException("Failed to import CSV graph: " + pe.getMessage(), pe); } catch (IllegalArgumentException iae) { throw new ImportException("Failed to import CSV graph: " + iae.getMessage(), iae); } }
GmlLexer lexer = new GmlLexer(CharStreams.fromReader(input)); lexer.removeErrorListeners(); lexer.addErrorListener(errorListener);
charStream = CharStreams.fromReader(multiSourceReader); } catch (IOException e) { throw new UncheckedIOException(e);
try { SyntaxErrorListener error = new SyntaxErrorListener(); CodePointCharStream stream = CharStreams.fromReader(reader);
this.stream = CharStreams.fromReader(reader, name);