/** Write a DatasetGraph in streaming fashion * * @param output OutputStream * @param datasetGraph DatasetGraph to write * @param format Syntax */ public static void write(OutputStream output, DatasetGraph datasetGraph, RDFFormat format) { StreamRDF stream = getWriterStream(output, format) ; StreamOps.datasetToStream(datasetGraph, stream) ; }
/** Get a StreamRDF destination that will output in syntax {@code Lang} * and is guaranteed to do so in a scaling, streaming fashion. * @param output OutputStream * @param lang The syntax * @return StreamRDF, or null if Lang does not have a streaming format. * @see StreamOps#graphToStream * @see StreamOps#datasetToStream */ public static StreamRDF getWriterStream(OutputStream output, Lang lang) { RDFFormat fmt = registry.choose(lang) ; return getWriterStream(output, fmt) ; }
/** Write a Graph in streaming fashion * * @param output OutputStream * @param graph Graph to write * @param lang Syntax */ public static void write(OutputStream output, Graph graph, RDFFormat lang) { StreamRDF stream = getWriterStream(output, lang) ; StreamOps.graphToStream(graph, stream) ; }
/** Create a streaming output sink if possible */ protected StreamRDF createStreamSink() { if ( modLangParse.toBitBucket() ) return StreamRDFLib.sinkNull() ; RDFFormat fmt = modLangOutput.getOutputStreamFormat() ; if ( fmt == null ) return null ; /** Create an accumulating output stream for later pretty printing */ return StreamRDFWriter.getWriterStream(outputWrite, fmt) ; }
/** Create a streaming output sink if possible */ protected StreamRDF createStreamSink() { if ( modLangParse.toBitBucket() ) return StreamRDFLib.sinkNull() ; RDFFormat fmt = modLangOutput.getOutputStreamFormat() ; if ( fmt == null ) return null ; /** Create an accumulating output stream for later pretty printing */ return StreamRDFWriter.getWriterStream(outputWrite, fmt) ; }
@Override public Observable<JsonNode> generate(final Observable<GDMModel> recordGDM, final OutputStream outputStream) throws XMLStreamException { final StreamRDF writer = StreamRDFWriter.getWriterStream(outputStream, rdfSerializationFormat); writer.start(); final ConcurrentHashMap<String, org.apache.jena.graph.Node> resourceNodeCache = new ConcurrentHashMap<>(); final ConcurrentHashMap<String, org.apache.jena.graph.Node> predicateCache = new ConcurrentHashMap<>(); return recordGDM .doOnSubscribe(() -> LOG.debug("subscribed to RDF export; will return data as '{}'", mediaType.toString())) .onBackpressureBuffer(10000) .map(recordGDMModel -> processRecordGDMModel(writer, resourceNodeCache, predicateCache, recordGDMModel)) .map(org.dswarm.persistence.model.internal.Model::toGDMCompactJSON) .flatMapIterable(nodes -> { final ArrayList<JsonNode> nodeList = new ArrayList<>(); Iterators.addAll(nodeList, nodes.elements()); return nodeList; }) .doOnCompleted(writer::finish) .doOnCompleted(() -> LOG.debug("finished RDF export; return data as '{}'", mediaType.toString())); }
private static void serializeNTriples(final RdfStream rdfStream, final RDFFormat format, final OutputStream output) { final StreamRDF stream = new SynchonizedStreamRDFWrapper(getWriterStream(output, format)); stream.start(); rdfStream.forEach(stream::triple); stream.finish(); }
private static void serializeBlockStreamed(final RdfStream rdfStream, final OutputStream output, final RDFFormat format, final Map<String, String> nsPrefixes) { final Set<String> namespacesPresent = new HashSet<>(); final StreamRDF stream = new SynchonizedStreamRDFWrapper(getWriterStream(output, format)); stream.start(); // Must read the rdf stream before writing out ns prefixes, otherwise the prefixes come after the triples final List<Triple> tripleList = rdfStream.peek(t -> { // Collect the namespaces present in the RDF stream, using the same // criteria for where to look that jena's model.listNameSpaces() does namespacesPresent.add(t.getPredicate().getNameSpace()); if (RDF_TYPE.equals(t.getPredicate().getURI()) && t.getObject().isURI()) { namespacesPresent.add(t.getObject().getNameSpace()); } }).collect(Collectors.toList()); nsPrefixes.forEach((prefix, uri) -> { // Only add namespace prefixes if the namespace is present in the rdf stream if (namespacesPresent.contains(uri)) { stream.prefix(prefix, uri); } }); tripleList.forEach(stream::triple); stream.finish(); }
final StreamRDF stream = getWriterStream(output, format); stream.start(); ofNullable(nsService).ifPresent(svc -> svc.getNamespaces().forEach(stream::prefix));
final StreamRDF stream = getWriterStream(output, format); stream.start(); ofNullable(nsService).ifPresent(svc -> svc.getNamespaces().forEach(stream::prefix));
private void streamResults(Iterator<Triple> results) { StreamRDF writer = StreamRDFWriter.getWriterStream(System.out, Lang.NTRIPLES); Cache<Triple,Boolean> seenTriples = CacheBuilder.newBuilder() .maximumSize(DUP_WINDOW).build(); writer.start(); while (results.hasNext()) { Triple triple = results.next(); if (seenTriples.getIfPresent(triple) != null) { // the triple has already been emitted continue; } seenTriples.put(triple, true); writer.triple(triple); } writer.finish(); }
StreamRDF output = StreamRDFWriter.getWriterStream(output1, Lang.NQUADS) ; try { startFixed(outStream) ;
StreamRDF output = StreamRDFWriter.getWriterStream(output1, Lang.NQUADS) ; try { startFixed(outStream) ;