public Map<RdfCloudTripleStoreConstants.TABLE_LAYOUT, Collection<Mutation>> serialize( RyaStatement stmt) throws IOException { Collection<Mutation> spo_muts = new ArrayList<Mutation>(); Collection<Mutation> po_muts = new ArrayList<Mutation>(); Collection<Mutation> osp_muts = new ArrayList<Mutation>(); /** * TODO: If there are contexts, do we still replicate the information into the default graph as well * as the named graphs? */ try { Map<TABLE_LAYOUT, TripleRow> rowMap = ryaContext.serializeTriple(stmt); TripleRow tripleRow = rowMap.get(TABLE_LAYOUT.SPO); spo_muts.add(createMutation(tripleRow)); tripleRow = rowMap.get(TABLE_LAYOUT.PO); po_muts.add(createMutation(tripleRow)); tripleRow = rowMap.get(TABLE_LAYOUT.OSP); osp_muts.add(createMutation(tripleRow)); } catch (TripleRowResolverException fe) { throw new IOException(fe); } Map<RdfCloudTripleStoreConstants.TABLE_LAYOUT, Collection<Mutation>> mutations = new HashMap<RdfCloudTripleStoreConstants.TABLE_LAYOUT, Collection<Mutation>>(); mutations.put(RdfCloudTripleStoreConstants.TABLE_LAYOUT.SPO, spo_muts); mutations.put(RdfCloudTripleStoreConstants.TABLE_LAYOUT.PO, po_muts); mutations.put(RdfCloudTripleStoreConstants.TABLE_LAYOUT.OSP, osp_muts); return mutations; }
public Map<RdfCloudTripleStoreConstants.TABLE_LAYOUT, Collection<Mutation>> serializeDelete( RyaStatement stmt) throws IOException { Collection<Mutation> spo_muts = new ArrayList<Mutation>(); Collection<Mutation> po_muts = new ArrayList<Mutation>(); Collection<Mutation> osp_muts = new ArrayList<Mutation>(); /** * TODO: If there are contexts, do we still replicate the information into the default graph as well * as the named graphs? */ try { Map<TABLE_LAYOUT, TripleRow> rowMap = ryaContext.serializeTriple(stmt); TripleRow tripleRow = rowMap.get(TABLE_LAYOUT.SPO); spo_muts.add(deleteMutation(tripleRow)); tripleRow = rowMap.get(TABLE_LAYOUT.PO); po_muts.add(deleteMutation(tripleRow)); tripleRow = rowMap.get(TABLE_LAYOUT.OSP); osp_muts.add(deleteMutation(tripleRow)); } catch (TripleRowResolverException fe) { throw new IOException(fe); } Map<RdfCloudTripleStoreConstants.TABLE_LAYOUT, Collection<Mutation>> mutations = new HashMap<RdfCloudTripleStoreConstants.TABLE_LAYOUT, Collection<Mutation>>(); mutations.put(RdfCloudTripleStoreConstants.TABLE_LAYOUT.SPO, spo_muts); mutations.put(RdfCloudTripleStoreConstants.TABLE_LAYOUT.PO, po_muts); mutations.put(RdfCloudTripleStoreConstants.TABLE_LAYOUT.OSP, osp_muts); return mutations; }
protected void deleteSingleRyaStatement(final RyaStatement stmt) throws IOException, MutationsRejectedException { final Map<TABLE_LAYOUT, Collection<Mutation>> map = ryaTableMutationsFactory.serializeDelete(stmt); bw_spo.addMutations(map.get(TABLE_LAYOUT.SPO)); bw_po.addMutations(map.get(TABLE_LAYOUT.PO)); bw_osp.addMutations(map.get(TABLE_LAYOUT.OSP)); }
private void checkVersion() throws RyaDAOException, IOException, MutationsRejectedException { final String version = getVersion(); if (version == null) { //adding to core Rya tables but not Indexes final Map<TABLE_LAYOUT, Collection<Mutation>> mutationMap = ryaTableMutationsFactory.serialize(getVersionRyaStatement()); final Collection<Mutation> spo = mutationMap.get(TABLE_LAYOUT.SPO); final Collection<Mutation> po = mutationMap.get(TABLE_LAYOUT.PO); final Collection<Mutation> osp = mutationMap.get(TABLE_LAYOUT.OSP); bw_spo.addMutations(spo); bw_po.addMutations(po); bw_osp.addMutations(osp); } //TODO: Do a version check here }
parentRyaContext = RyaTripleContext.getInstance(parentAccumuloRdfConfiguration); ryaTableMutationFactory = new RyaTableMutationsFactory(parentRyaContext);
private void checkVersion() throws RyaDAOException, IOException, MutationsRejectedException { final String version = getVersion(); if (version == null) { //adding to core Rya tables but not Indexes final Map<TABLE_LAYOUT, Collection<Mutation>> mutationMap = ryaTableMutationsFactory.serialize(getVersionRyaStatement()); final Collection<Mutation> spo = mutationMap.get(TABLE_LAYOUT.SPO); final Collection<Mutation> po = mutationMap.get(TABLE_LAYOUT.PO); final Collection<Mutation> osp = mutationMap.get(TABLE_LAYOUT.OSP); bw_spo.addMutations(spo); bw_po.addMutations(po); bw_osp.addMutations(osp); } //TODO: Do a version check here }
ryaTableMutationsFactory = new RyaTableMutationsFactory(ryaContext);
protected void commit(final Iterator<RyaStatement> commitStatements) throws RyaDAOException { try { //TODO: Should have a lock here in case we are adding and committing at the same time while (commitStatements.hasNext()) { final RyaStatement stmt = commitStatements.next(); final Map<TABLE_LAYOUT, Collection<Mutation>> mutationMap = ryaTableMutationsFactory.serialize(stmt); final Collection<Mutation> spo = mutationMap.get(TABLE_LAYOUT.SPO); final Collection<Mutation> po = mutationMap.get(TABLE_LAYOUT.PO); final Collection<Mutation> osp = mutationMap.get(TABLE_LAYOUT.OSP); bw_spo.addMutations(spo); bw_po.addMutations(po); bw_osp.addMutations(osp); for (final AccumuloIndexer index : secondaryIndexers) { index.storeStatement(stmt); } } if (flushEachUpdate.get()) { mt_bw.flush(); } } catch (final Exception e) { throw new RyaDAOException(e); } }
ryaTableMutationsFactory = new RyaTableMutationsFactory(ryaContext);
protected void deleteSingleRyaStatement(final RyaStatement stmt) throws IOException, MutationsRejectedException { final Map<TABLE_LAYOUT, Collection<Mutation>> map = ryaTableMutationsFactory.serializeDelete(stmt); bw_spo.addMutations(map.get(TABLE_LAYOUT.SPO)); bw_po.addMutations(map.get(TABLE_LAYOUT.PO)); bw_osp.addMutations(map.get(TABLE_LAYOUT.OSP)); }
public Map<RdfCloudTripleStoreConstants.TABLE_LAYOUT, Collection<Mutation>> serializeDelete( RyaStatement stmt) throws IOException { Collection<Mutation> spo_muts = new ArrayList<Mutation>(); Collection<Mutation> po_muts = new ArrayList<Mutation>(); Collection<Mutation> osp_muts = new ArrayList<Mutation>(); /** * TODO: If there are contexts, do we still replicate the information into the default graph as well * as the named graphs? */ try { Map<TABLE_LAYOUT, TripleRow> rowMap = ryaContext.serializeTriple(stmt); TripleRow tripleRow = rowMap.get(TABLE_LAYOUT.SPO); spo_muts.add(deleteMutation(tripleRow)); tripleRow = rowMap.get(TABLE_LAYOUT.PO); po_muts.add(deleteMutation(tripleRow)); tripleRow = rowMap.get(TABLE_LAYOUT.OSP); osp_muts.add(deleteMutation(tripleRow)); } catch (TripleRowResolverException fe) { throw new IOException(fe); } Map<RdfCloudTripleStoreConstants.TABLE_LAYOUT, Collection<Mutation>> mutations = new HashMap<RdfCloudTripleStoreConstants.TABLE_LAYOUT, Collection<Mutation>>(); mutations.put(RdfCloudTripleStoreConstants.TABLE_LAYOUT.SPO, spo_muts); mutations.put(RdfCloudTripleStoreConstants.TABLE_LAYOUT.PO, po_muts); mutations.put(RdfCloudTripleStoreConstants.TABLE_LAYOUT.OSP, osp_muts); return mutations; }
public Map<RdfCloudTripleStoreConstants.TABLE_LAYOUT, Collection<Mutation>> serialize( RyaStatement stmt) throws IOException { Collection<Mutation> spo_muts = new ArrayList<Mutation>(); Collection<Mutation> po_muts = new ArrayList<Mutation>(); Collection<Mutation> osp_muts = new ArrayList<Mutation>(); /** * TODO: If there are contexts, do we still replicate the information into the default graph as well * as the named graphs? */ try { Map<TABLE_LAYOUT, TripleRow> rowMap = ryaContext.serializeTriple(stmt); TripleRow tripleRow = rowMap.get(TABLE_LAYOUT.SPO); spo_muts.add(createMutation(tripleRow)); tripleRow = rowMap.get(TABLE_LAYOUT.PO); po_muts.add(createMutation(tripleRow)); tripleRow = rowMap.get(TABLE_LAYOUT.OSP); osp_muts.add(createMutation(tripleRow)); } catch (TripleRowResolverException fe) { throw new IOException(fe); } Map<RdfCloudTripleStoreConstants.TABLE_LAYOUT, Collection<Mutation>> mutations = new HashMap<RdfCloudTripleStoreConstants.TABLE_LAYOUT, Collection<Mutation>>(); mutations.put(RdfCloudTripleStoreConstants.TABLE_LAYOUT.SPO, spo_muts); mutations.put(RdfCloudTripleStoreConstants.TABLE_LAYOUT.PO, po_muts); mutations.put(RdfCloudTripleStoreConstants.TABLE_LAYOUT.OSP, osp_muts); return mutations; }
protected void commit(final Iterator<RyaStatement> commitStatements) throws RyaDAOException { try { //TODO: Should have a lock here in case we are adding and committing at the same time while (commitStatements.hasNext()) { final RyaStatement stmt = commitStatements.next(); final Map<TABLE_LAYOUT, Collection<Mutation>> mutationMap = ryaTableMutationsFactory.serialize(stmt); final Collection<Mutation> spo = mutationMap.get(TABLE_LAYOUT.SPO); final Collection<Mutation> po = mutationMap.get(TABLE_LAYOUT.PO); final Collection<Mutation> osp = mutationMap.get(TABLE_LAYOUT.OSP); bw_spo.addMutations(spo); bw_po.addMutations(po); bw_osp.addMutations(osp); for (final AccumuloIndexer index : secondaryIndexers) { index.storeStatement(stmt); } } if (flushEachUpdate.get()) { mt_bw.flush(); } } catch (final Exception e) { throw new RyaDAOException(e); } }
private void writeRyaMutations(final RyaStatement ryaStatement, final Context context, final boolean isDelete) throws IOException, InterruptedException { if (ryaStatement.getColumnVisibility() == null) { ryaStatement.setColumnVisibility(AccumuloRdfConstants.EMPTY_CV.getExpression()); } final Map<TABLE_LAYOUT, Collection<Mutation>> mutationMap = ryaTableMutationFactory.serialize(ryaStatement); final Collection<Mutation> spoMutations = mutationMap.get(TABLE_LAYOUT.SPO); final Collection<Mutation> poMutations = mutationMap.get(TABLE_LAYOUT.PO); final Collection<Mutation> ospMutations = mutationMap.get(TABLE_LAYOUT.OSP); for (final Mutation mutation : spoMutations) { writeMutation(spoTable, mutation, context, isDelete); } for (final Mutation mutation : poMutations) { writeMutation(poTable, mutation, context, isDelete); } for (final Mutation mutation : ospMutations) { writeMutation(ospTable, mutation, context, isDelete); } }