/** * Parses multiple VCALENDARs from the specified stream tokeniser. * * @param tokeniser * @param handler * @throws IOException * @throws ParseException * @throws URISyntaxException * @throws ParserException */ private void parseCalendarList(final StreamTokenizer tokeniser, Reader in, final ContentHandler handler) throws IOException, ParseException, URISyntaxException, ParserException { // BEGIN:VCALENDAR int ntok = assertToken(tokeniser, in, Calendar.BEGIN, false, true); while (ntok != StreamTokenizer.TT_EOF) { parseCalendar(tokeniser, in, handler); ntok = absorbWhitespace(tokeniser, in, true); } }
/** * {@inheritDoc} */ public final void parse(final InputStream in, final ContentHandler handler) throws IOException, ParserException { parse(new InputStreamReader(in), handler); }
/** * {@inheritDoc} */ public CalendarParser createParser() { return new CalendarParserImpl(); } }
skipNewLines(tokeniser, in, token); sval = getSvalIgnoringBom(tokeniser, in, token); } else { assertToken(tokeniser, in, StreamTokenizer.TT_WORD); sval = tokeniser.sval; throw new ParserException(MessageFormat.format(UNEXPECTED_TOKEN_MESSAGE, token, sval), getLineNumber(tokeniser, in)); throw new ParserException(MessageFormat.format(UNEXPECTED_TOKEN_MESSAGE, token, sval), getLineNumber(tokeniser, in));
/** * Asserts that the next token in the stream matches the specified token. * * @param tokeniser stream tokeniser to perform assertion on * @param token expected token * @throws IOException when unable to read from stream * @throws ParserException when next token in the stream does not match the expected token */ private void assertToken(final StreamTokenizer tokeniser, Reader in, final int token) throws IOException, ParserException { if (nextToken(tokeniser, in) != token) { throw new ParserException(MessageFormat.format(UNEXPECTED_TOKEN_MESSAGE, token, tokeniser.ttype), getLineNumber(tokeniser, in)); } if (log.isDebugEnabled()) { log.debug("[" + token + "]"); } }
/** * Asserts that the next token in the stream matches the specified token. This method is case-sensitive. * @param tokeniser * @param token * @throws IOException * @throws ParserException */ private void assertToken(final StreamTokenizer tokeniser, Reader in, final String token) throws IOException, ParserException { assertToken(tokeniser, in, token, false); }
/** * Asserts that the next token in the stream matches the specified token. * @param tokeniser stream tokeniser to perform assertion on * @param token expected token * @throws IOException when unable to read from stream * @throws ParserException when next token in the stream does not match the expected token */ private void assertToken(final StreamTokenizer tokeniser, Reader in, final String token, final boolean ignoreCase) throws IOException, ParserException { // ensure next token is a word token.. assertToken(tokeniser, in, StreamTokenizer.TT_WORD); if (ignoreCase) { if (!token.equalsIgnoreCase(tokeniser.sval)) { throw new ParserException(MessageFormat.format(UNEXPECTED_TOKEN_MESSAGE, new Object[] { token, tokeniser.sval, }), getLineNumber(tokeniser, in)); } } else if (!token.equals(tokeniser.sval)) { throw new ParserException(MessageFormat.format(UNEXPECTED_TOKEN_MESSAGE, new Object[] { token, tokeniser.sval, }), getLineNumber(tokeniser, in)); } if (log.isDebugEnabled()) { log.debug("[" + token + "]"); } }
/** * {@inheritDoc} */ public final void parse(final Reader in, final ContentHandler handler) throws IOException, ParserException { final StreamTokenizer tokeniser = new StreamTokenizer(in); try { tokeniser.resetSyntax(); tokeniser.wordChars(WORD_CHAR_START, WORD_CHAR_END); tokeniser.whitespaceChars(WHITESPACE_CHAR_START, WHITESPACE_CHAR_END); tokeniser.ordinaryChar(':'); tokeniser.ordinaryChar(';'); tokeniser.ordinaryChar('='); tokeniser.ordinaryChar('\t'); tokeniser.eolIsSignificant(true); tokeniser.whitespaceChars(0, 0); tokeniser.quoteChar('"'); parseCalendarList(tokeniser, in, handler); } catch (IOException | ParseException | URISyntaxException | RuntimeException e) { if (e instanceof IOException) { throw (IOException) e; } if (e instanceof ParserException) { throw (ParserException) e; } else { throw new ParserException(e.getMessage(), getLineNumber(tokeniser, in), e); } } }
/** * Reads the next token from the tokeniser. * This method throws a ParseException when reading EOF. * * @param tokeniser * @param in * @param ignoreEOF * @return int value of the ttype field of the tokeniser * @throws ParseException When reading EOF. */ private int nextToken(StreamTokenizer tokeniser, Reader in, boolean ignoreEOF) throws IOException, ParserException { int token = tokeniser.nextToken(); if (!ignoreEOF && token == StreamTokenizer.TT_EOF) { throw new ParserException("Unexpected end of file", getLineNumber(tokeniser, in)); } return token; } }
/** * * @param tokeniser * @param in * @return int value of the ttype field of the tokeniser * @throws IOException * @throws ParserException */ private int nextToken(StreamTokenizer tokeniser, Reader in) throws IOException, ParserException { return nextToken(tokeniser, in, false); }
int ntok; if(isBeginToken) { ntok = skipNewLines(tokeniser, in, token); sval = getSvalIgnoringBom(tokeniser, in, token); } else { ntok = assertToken(tokeniser, in, StreamTokenizer.TT_WORD); sval = tokeniser.sval; throw new ParserException(MessageFormat.format(UNEXPECTED_TOKEN_MESSAGE, token, sval), getLineNumber(tokeniser, in)); throw new ParserException(MessageFormat.format(UNEXPECTED_TOKEN_MESSAGE, token, sval), getLineNumber(tokeniser, in));
/** * Asserts that the next token in the stream matches the specified token. * * @param tokeniser stream tokeniser to perform assertion on * @param token expected token * @return int value of the ttype field of the tokeniser * @throws IOException when unable to read from stream * @throws ParserException when next token in the stream does not match the expected token */ private int assertToken(final StreamTokenizer tokeniser, Reader in, final int token) throws IOException, ParserException { int ntok = nextToken(tokeniser, in); if (ntok != token) { throw new ParserException(MessageFormat.format(UNEXPECTED_TOKEN_MESSAGE, token, tokeniser.ttype), getLineNumber(tokeniser, in)); } if (log.isDebugEnabled()) { log.debug("[" + token + "]"); } return ntok; }
/** * Asserts that the next token in the stream matches the specified token. This method is case-sensitive. * * @param tokeniser * @param token * @return int value of the ttype field of the tokeniser * @throws IOException * @throws ParserException */ private int assertToken(final StreamTokenizer tokeniser, Reader in, final String token) throws IOException, ParserException { return assertToken(tokeniser, in, token, false, false); }
assertToken(tokeniser, in, Calendar.BEGIN, false, true); assertToken(tokeniser, in, ':'); assertToken(tokeniser, in, Calendar.VCALENDAR, true, false); assertToken(tokeniser, in, StreamTokenizer.TT_EOL); assertToken(tokeniser, in, ':'); assertToken(tokeniser, in, Calendar.VCALENDAR, true, false); throw (ParserException) e; } else { throw new ParserException(e.getMessage(), getLineNumber(tokeniser, in), e);
/** * {@inheritDoc} */ public final void parse(final Reader in, final ContentHandler handler) throws IOException, ParserException { final StreamTokenizer tokeniser = new StreamTokenizer(in); try { tokeniser.resetSyntax(); tokeniser.wordChars(WORD_CHAR_START, WORD_CHAR_END); tokeniser.whitespaceChars(WHITESPACE_CHAR_START, WHITESPACE_CHAR_END); tokeniser.ordinaryChar(':'); tokeniser.ordinaryChar(';'); tokeniser.ordinaryChar('='); tokeniser.ordinaryChar('\t'); tokeniser.eolIsSignificant(true); tokeniser.whitespaceChars(0, 0); tokeniser.quoteChar('"'); parseCalendarList(tokeniser, in, handler); } catch (IOException | ParseException | URISyntaxException | RuntimeException e) { if (e instanceof IOException) { throw (IOException) e; } if (e instanceof ParserException) { throw (ParserException) e; } else { throw new ParserException(e.getMessage(), getLineNumber(tokeniser, in), e); } } }
/** * Reads the next token from the tokeniser. * This method throws a ParseException when reading EOF. * * @param tokeniser * @param in * @param ignoreEOF * @return int value of the ttype field of the tokeniser * @throws ParseException When reading EOF. */ private int nextToken(StreamTokenizer tokeniser, Reader in, boolean ignoreEOF) throws IOException, ParserException { int token = tokeniser.nextToken(); if (!ignoreEOF && token == StreamTokenizer.TT_EOF) { throw new ParserException("Unexpected end of file", getLineNumber(tokeniser, in)); } return token; } }
/** * * @param tokeniser * @param in * @return int value of the ttype field of the tokeniser * @throws IOException * @throws ParserException */ private int nextToken(StreamTokenizer tokeniser, Reader in) throws IOException, ParserException { return nextToken(tokeniser, in, false); }
int ntok; if(isBeginToken) { ntok = skipNewLines(tokeniser, in, token); sval = getSvalIgnoringBom(tokeniser, in, token); } else { ntok = assertToken(tokeniser, in, StreamTokenizer.TT_WORD); sval = tokeniser.sval; throw new ParserException(MessageFormat.format(UNEXPECTED_TOKEN_MESSAGE, token, sval), getLineNumber(tokeniser, in)); throw new ParserException(MessageFormat.format(UNEXPECTED_TOKEN_MESSAGE, token, sval), getLineNumber(tokeniser, in));
/** * Parses multiple VCALENDARs from the specified stream tokeniser. * * @param tokeniser * @param handler * @throws IOException * @throws ParseException * @throws URISyntaxException * @throws ParserException */ private void parseCalendarList(final StreamTokenizer tokeniser, Reader in, final ContentHandler handler) throws IOException, ParseException, URISyntaxException, ParserException { // BEGIN:VCALENDAR int ntok = assertToken(tokeniser, in, Calendar.BEGIN, false, true); while (ntok != StreamTokenizer.TT_EOF) { parseCalendar(tokeniser, in, handler); ntok = absorbWhitespace(tokeniser, in, true); } }
/** * Asserts that the next token in the stream matches the specified token. * * @param tokeniser stream tokeniser to perform assertion on * @param token expected token * @return int value of the ttype field of the tokeniser * @throws IOException when unable to read from stream * @throws ParserException when next token in the stream does not match the expected token */ private int assertToken(final StreamTokenizer tokeniser, Reader in, final int token) throws IOException, ParserException { int ntok = nextToken(tokeniser, in); if (ntok != token) { throw new ParserException(MessageFormat.format(UNEXPECTED_TOKEN_MESSAGE, token, tokeniser.ttype), getLineNumber(tokeniser, in)); } if (log.isDebugEnabled()) { log.debug("[" + token + "]"); } return ntok; }