/** * Adds RDF data to the remote repository. * * @param add * The RDF data to be added. * * @return The mutation count. */ public long add(final AddOp add) throws Exception { return add(add, UUID.randomUUID()/*queryId*/); }
/** * Insert a resource into the {@link NanoSparqlServer}. This is used to * load resources in the test package into the server. */ protected long doInsertbyURL(final String method, final String resource) throws Exception { final String uri = new File(resource).toURI().toString(); final AddOp add = new AddOp(uri); return m_repo.add(add); }
/** * Load a file. * * @param file * The file. * @param format * The file format. * @throws Exception */ protected void loadFile(final String file, final RDFFormat format) throws Exception { final AddOp add = new AddOp(new File(file), format); m_repo.add(add); }
/** * Load a file. * * @param file * The file. * @param format * The file format. * @throws Exception */ protected void loadFile(final String file, final RDFFormat format) throws Exception { final AddOp add = new AddOp(new File(file), format); m_repo.add(add); }
private void loadStatements(final String namespace, final int nstatements) throws Exception { final Collection<Statement> stmts = new ArrayList<>(nstatements); for (int i = 0; i < nstatements; i++) { stmts.add(generateTriple()); } log.warn(String.format("Loading package into %s namespace...", namespace)); m_mgr.getRepositoryForNamespace(namespace).add(new RemoteRepository.AddOp(stmts)); log.warn(String.format("Loading package into %s namespace done", namespace)); }
public static void loadDataFromResource(final RemoteRepositoryManager repo, final String namespace, final String resource) throws Exception { final InputStream is = SampleBlazegraphCustomFunctionRemote.class .getResourceAsStream(resource); if (is == null) { throw new IOException("Could not locate resource: " + resource); } try { repo.getRepositoryForNamespace(namespace).add( new RemoteRepository.AddOp(is, RDFFormat.N3)); } finally { is.close(); } }
public static void loadDataFromResource(final RemoteRepositoryManager repo, final String namespace, final String resource) throws Exception { final InputStream is = SampleBlazegraphCustomFunctionRemote.class .getResourceAsStream(resource); if (is == null) { throw new IOException("Could not locate resource: " + resource); } try { repo.getRepositoryForNamespace(namespace).add( new RemoteRepository.AddOp(is, RDFFormat.N3)); } finally { is.close(); } }
private static void loadDataFromResource(final RemoteRepositoryManager repo, final String namespace, final String resource) throws Exception { final InputStream is = SampleBlazegraphSesameRemote.class .getResourceAsStream(resource); if (is == null) { throw new IOException("Could not locate resource: " + resource); } try { repo.getRepositoryForNamespace(namespace).add( new RemoteRepository.AddOp(is, RDFFormat.N3)); } finally { is.close(); } }
@Override protected void doApplyToNamespace(final RemoteRepository repo, final UUID uuid) throws Exception { // Setup data. final RemoteRepository.AddOp op; { final Collection<Statement> stmts = new ArrayList<>(batchSize); for (int i = 0; i < batchSize; i++) { stmts.add(generateTriple()); } op = new RemoteRepository.AddOp(stmts); } // do mutation. repo.add(op, uuid); }
/** * FIXME We need to verify export for this case. It relies on access to a * Bigdata specific ValueFactoryImpl to handle the RDR mode statements. */ public void test_EXPORT_TURTLE_RDR() throws Exception { if(!BigdataStatics.runKnownBadTests) { return; } final long ntriples = 3L; InputStream is = null; try { is = new FileInputStream(new File(packagePath + "rdr_01.ttlx")); final AddOp add = new AddOp(is, ServiceProviderHook.TURTLE_RDR); assertEquals(ntriples, m_repo.add(add)); } finally { if (is != null) { is.close(); } } fail("write export test for TURTLE-RDR"); }
private void add(final AddOp op, final Resource... c) throws RepositoryException { try { op.setContext(c); final RemoteRepository remote = repo.getRemoteRepository(); remote.add(op); } catch (Exception ex) { throw new RepositoryException(ex); } }
/** * Insert a resource into the {@link NanoSparqlServer}. This is used to * load resources in the test package into the server. */ protected long doInsertbyURL(final String method, final String resource) throws Exception { final String uri = new File(resource).toURI().toString(); final AddOp add = new AddOp(uri); return m_repo.add(add); }
/** * Load a file. * * @param file * The file. * @param format * The file format. * @throws Exception */ protected void loadFile(final String file, final RDFFormat format) throws Exception { final AddOp add = new AddOp(new File(file), format); m_repo.add(add); }
/** * Load a file. * * @param file * The file. * @param format * The file format. * @throws Exception */ protected void loadFile(final String file, final RDFFormat format) throws Exception { final AddOp add = new AddOp(new File(file), format); m_repo.add(add); }
private void loadStatements(final String namespace, final int nstatements) throws Exception { final Collection<Statement> stmts = new ArrayList<>(nstatements); for (int i = 0; i < nstatements; i++) { stmts.add(generateTriple()); } log.warn(String.format("Loading package into %s namespace...", namespace)); m_mgr.getRepositoryForNamespace(namespace).add(new RemoteRepository.AddOp(stmts)); log.warn(String.format("Loading package into %s namespace done", namespace)); }
/** * Reads a resource and sends it using an INSERT with BODY request to be * loaded into the database. * * @param method * @param servlet * @param resource * @return * @throws Exception */ protected long doInsertByBody(final String method, /*final String servlet,*/ final RDFFormat rdfFormat, final Graph g, final URI defaultContext) throws Exception { final byte[] wireData = writeOnBuffer(rdfFormat, g); // final RemoteRepository repo = new RemoteRepository(m_serviceURL); final AddOp add = new AddOp(wireData, rdfFormat); if (defaultContext != null) add.setContext(defaultContext); return m_repo.add(add); }
@Override protected void doApplyToNamespace(final RemoteRepository repo, final UUID uuid) throws Exception { // Setup data. final RemoteRepository.AddOp op; { final Collection<Statement> stmts = new ArrayList<>(batchSize); for (int i = 0; i < batchSize; i++) { stmts.add(generateTriple()); } op = new RemoteRepository.AddOp(stmts); } // do mutation. repo.add(op, uuid); }
/** * Test of POST w/ BODY having data to be loaded. */ protected void doInsertWithBodyTest(final String method, final int ntriples, /*final String servlet,*/ final RDFFormat format) throws Exception { final byte[] data = genNTRIPLES(ntriples, format); // final File file = File.createTempFile("bigdata-testnssclient", ".data"); /* * Only for testing. Clients should use AddOp(File, RDFFormat). */ final AddOp add = new AddOp(data, format); assertEquals(ntriples, m_repo.add(add)); // Verify the expected #of statements in the store. { final String queryStr = "select * where {?s ?p ?o}"; final IPreparedTupleQuery query = m_repo.prepareTupleQuery(queryStr); assertEquals(ntriples, countResults(query.evaluate())); } }
/** * Reads a resource and sends it using an INSERT with BODY request to be * loaded into the database. * * @param method * @param servlet * @param resource * @return * @throws Exception */ protected long doInsertByBody(final String method, /*final String servlet,*/ final RDFFormat rdfFormat, final Graph g, final URI defaultContext) throws Exception { final byte[] wireData = writeOnBuffer(rdfFormat, g); // final RemoteRepository repo = new RemoteRepository(m_serviceURL); final AddOp add = new AddOp(wireData, rdfFormat); if (defaultContext != null) add.setContext(defaultContext); return m_repo.add(add); }
/** * Test of POST w/ BODY having data to be loaded. */ protected void doInsertWithBodyTest(final String method, final int ntriples, /*final String servlet,*/ final RDFFormat format) throws Exception { final byte[] data = genNTRIPLES(ntriples, format); // final File file = File.createTempFile("bigdata-testnssclient", ".data"); /* * Only for testing. Clients should use AddOp(File, RDFFormat). */ final AddOp add = new AddOp(data, format); assertEquals(ntriples, m_repo.add(add)); // Verify the expected #of statements in the store. { final String queryStr = "select * where {?s ?p ?o}"; final IPreparedTupleQuery query = m_repo.prepareTupleQuery(queryStr); assertEquals(ntriples, countResults(query.evaluate())); } }