@Test public void testMarkSupported() throws Exception { final Reader reader = new CharSequenceReader("FooBar"); assertTrue(reader.markSupported()); reader.close(); }
@Test public void testRead() throws IOException { final Reader reader = new CharSequenceReader("Foo"); assertEquals('F', reader.read()); assertEquals('o', reader.read()); assertEquals('o', reader.read()); assertEquals(-1, reader.read()); assertEquals(-1, reader.read()); reader.close(); }
@Test public void testClose() throws IOException { final Reader reader = new CharSequenceReader("FooBar"); checkRead(reader, "Foo"); reader.close(); checkRead(reader, "Foo"); }
@Test public void testSkip() throws IOException { final Reader reader = new CharSequenceReader("FooBar"); assertEquals(3, reader.skip(3)); checkRead(reader, "Bar"); assertEquals(-1, reader.skip(3)); reader.reset(); assertEquals(2, reader.skip(2)); assertEquals(4, reader.skip(10)); assertEquals(-1, reader.skip(1)); reader.close(); assertEquals(6, reader.skip(20)); assertEquals(-1, reader.read()); }
@Test public void testReadCharArrayPortion() throws IOException { final char[] chars = new char[10]; final Reader reader = new CharSequenceReader("FooBar"); assertEquals(3, reader.read(chars, 3, 3)); checkArray(new char[] {NONE, NONE, NONE, 'F', 'o', 'o'}, chars); assertEquals(3, reader.read(chars, 0, 3)); checkArray(new char[] {'B', 'a', 'r', 'F', 'o', 'o', NONE}, chars); assertEquals(-1, reader.read(chars)); reader.close(); }
@Test public void testMark() throws IOException { final Reader reader = new CharSequenceReader("FooBar"); checkRead(reader, "Foo"); reader.mark(0); checkRead(reader, "Bar"); reader.reset(); checkRead(reader, "Bar"); reader.close(); checkRead(reader, "Foo"); reader.reset(); checkRead(reader, "Foo"); }
@Test public void testReadCharArray() throws IOException { final Reader reader = new CharSequenceReader("FooBar"); char[] chars = new char[2]; assertEquals(2, reader.read(chars)); checkArray(new char[] {'F', 'o'}, chars); chars = new char[3]; assertEquals(3, reader.read(chars)); checkArray(new char[] {'o', 'B', 'a'}, chars); chars = new char[3]; assertEquals(1, reader.read(chars)); checkArray(new char[] {'r', NONE, NONE}, chars); assertEquals(-1, reader.read(chars)); reader.close(); }
@Override public final Reader getErrorOutput() { return new CharSequenceReader( stderrBuilder ); }
@Override public final Reader getStandardOutput() { return new CharSequenceReader( stdoutBuilder ); }
public final Reader getStandardOutput() { return new CharSequenceReader(stdoutBuilder); }
public final Reader getErrorOutput() { return new CharSequenceReader(stderrBuilder); }
/** * Create a buffered reader from a text. * * @return the reader */ public static BufferedReader createBufferedReader(CharSequence text) { return new BufferedReader(new CharSequenceReader(text)); }
@Override public Reader render(Reader content, RenderContext context) throws IOException { TokenReplacer tokenReplacer = renderReplace(content, context); // The last step of this rendering chain is _always_ the markup parsing // There is no (good) reason why it isn't itself a RenderComponent, but this is more explicit // If we want to allow for post render steps this may be required, // but one question will be whether we want have a different interface to be explicit, or just rely on ordering? // We are now doing the markup last because we don't want all the escaping/antisamy mucking with our data // and this is the easiest way. It wasn't possible before when we escaped all HTML. CharSequence markup = transform.apply(tokenReplacer.getIntermediate()); return new CharSequenceReader(tokenReplacer.resolve(markup)); }
@Override public Reader render(Reader content, RenderContext context) throws IOException { TokenReplacer tokenReplacer = renderReplace(content, context); // The last step of this rendering chain is _always_ the markup parsing // There is no (good) reason why it isn't itself a RenderComponent, but this is more explicit // If we want to allow for post render steps this may be required, // but one question will be whether we want have a different interface to be explicit, or just rely on ordering? // We are now doing the markup last because we don't want all the escaping/antisamy mucking with our data // and this is the easiest way. It wasn't possible before when we escaped all HTML. CharSequence markup = transform.apply(tokenReplacer.getIntermediate()); return new CharSequenceReader(tokenReplacer.resolve(markup)); }
/** * Write apply a given transform a resource and then write the transformed content * to the supplied OutputStream. * Note that the OutputStream will not be closed by this method. * @param originalResource - the resource to transform * @param encoding - the encoding to use for writing * @param out - the output stream * @param transform - a function for transforming the content * @throws DownloadException - thrown if it is not possible to stream the output * @since 2.9.0 */ public static void transformAndStreamResource(final DownloadableResource originalResource, final Charset encoding, final OutputStream out, final Function<CharSequence, CharSequence> transform) throws DownloadException { try { final StringWriter writer = new StringWriter(); final WriterOutputStream output = new WriterOutputStream(writer, encoding); originalResource.streamResource(output); output.flush(); IOUtils.copy(new CharSequenceReader(transform.apply(writer.getBuffer().toString())), out, encoding.name()); } catch (final IOException e) { throw new DownloadException("Unable to stream to the output", e); } }}
/** * Add terms to the query for the synonyms. * * @param dmq * {@link DisjunctionMaxQuery} * @param original * Original term to determine synonyms for. */ private void addSynonyms(DisjunctionMaxQuery dmq, CharSequence original) throws IOException { try (TokenStream synonymTokens = optSynonymAnalyzer.tokenStream("querqy", new CharSequenceReader(original))) { synonymTokens.reset(); CharTermAttribute generated = synonymTokens.addAttribute(CharTermAttribute.class); while (synonymTokens.incrementToken()) { // We need to copy "generated" per toString() here, because // "generated" is transient. dmq.addClause(new Term(dmq, generated.toString(), true)); } synonymTokens.end(); } }
@Override public Iterable<IndexableField> getFieldValues(XmlIndexer indexer) { XdmNode doc = indexer.getXdmNode(); if (doc != null && doc.getUnderlyingNode() != null) { SaxonDocBuilder builder = indexer.getSaxonDocBuilder(); String fieldName = getName(); Analyzer analyzer = getAnalyzer(); TokenStream textTokens=null; try { textTokens = analyzer.tokenStream(fieldName, new CharSequenceReader("")); } catch (IOException e) { } XmlTextTokenStream tokens = new XmlTextTokenStream (fieldName, analyzer, textTokens, doc, builder.getOffsets(), indexer.getProcessor()); tokens.configureElementVisibility(indexer); return new FieldValues (this, Collections.singleton(new TextField(fieldName, tokens))); } return Collections.emptySet(); }
@Override public Iterable<IndexableField> getFieldValues(XmlIndexer indexer) { XdmNode doc = indexer.getXdmNode(); if (doc != null && doc.getUnderlyingNode() != null) { SaxonDocBuilder builder = indexer.getSaxonDocBuilder(); Analyzer analyzer = getAnalyzer(); TokenStream textTokens=null; try { textTokens = analyzer.tokenStream(getName(), new CharSequenceReader("")); } catch (IOException e) { } AttributeTokenStream tokens = new AttributeTokenStream(getName(), analyzer, textTokens, doc, builder.getOffsets(), indexer.getProcessor()); return new FieldValues (this, Collections.singleton(new TextField(getName(), tokens))); } return Collections.emptySet(); } }
@Override public Iterable<IndexableField> getFieldValues(XmlIndexer indexer) { XdmNode doc = indexer.getXdmNode(); if (doc != null && doc.getUnderlyingNode() != null) { SaxonDocBuilder builder = indexer.getSaxonDocBuilder(); Analyzer analyzer = getAnalyzer(); TokenStream textTokens=null; try { textTokens = analyzer.tokenStream(getName(), new CharSequenceReader("")); } catch (IOException e) { } XmlTokenStreamBase tokens = new ElementTokenStream (getName(), analyzer, textTokens, doc, builder.getOffsets(), indexer.getProcessor()); tokens.configureElementVisibility(indexer); return new FieldValues (this, Collections.singleton(new TextField(getName(), tokens))); } return Collections.emptySet(); } }
public XdmNode highlight (Query query, NodeInfo node) throws XMLStreamException, SaxonApiException { if (needsPositions(query)) { // A partial workaround for highlighting element text queries with phrases query = replaceFields (query, textFieldName); } scorer = new QueryScorer(query); // grab all the text at once so Lucene's lame-ass highlighter can figure out if there are any // phrases in it... // TODO: is this the Analyzer we're looking for??? OR ... reimplement using different HL Analyzer defaultAnalyzer = new DefaultAnalyzer(); TokenStream textTokens = null; try { textTokens = defaultAnalyzer.tokenStream("xml_text", new CharSequenceReader("")); } catch (IOException e) { } init(new XmlTextTokenStream("xml_text", defaultAnalyzer, textTokens, new XdmNode (node), null, processor)); XmlReader xmlReader = new XmlReader (); xmlReader.addHandler(this); xmlReader.read(node); // setBaseURI (URI.create(node.getBaseURI())); if (getDocument().getUnderlyingNode() instanceof TinyDocumentImpl) { ((TinyDocumentImpl)getDocument().getUnderlyingNode()).setBaseURI(node.getSystemId()); } return getDocument(); }