/** * Flushes all Mutations in the index writer. And all metric mutations to the metrics table. * Note that the metrics table is not updated until this method is explicitly called (or implicitly via close). */ public void flush() { try { // Flush index writer indexWriter.flush(); // Write out metrics mutations BatchWriter metricsWriter = connector.createBatchWriter(table.getMetricsTableName(), writerConfig); metricsWriter.addMutations(getMetricsMutations()); metricsWriter.close(); // Re-initialize the metrics metrics.clear(); metrics.put(METRICS_TABLE_ROW_COUNT, new AtomicLong(0)); } catch (MutationsRejectedException e) { throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Index mutation was rejected by server on flush", e); } catch (TableNotFoundException e) { throw new PrestoException(ACCUMULO_TABLE_DNE, "Accumulo table does not exist", e); } }
public static void main(String[] args) throws Exception { Opts opts = new Opts(table_name); opts.setPrincipal("root"); BatchWriterOpts bwOpts = new BatchWriterOpts(); opts.parseArgs(RandomWriter.class.getName(), args, bwOpts); long start = System.currentTimeMillis(); log.info("starting at {} for user {}", start, opts.getPrincipal()); try (AccumuloClient accumuloClient = opts.createClient()) { BatchWriter bw = accumuloClient.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig()); log.info("Writing {} mutations...", opts.count); bw.addMutations(new RandomMutationGenerator(opts.count)); bw.close(); } catch (Exception e) { log.error("{}", e.getMessage(), e); throw e; } long stop = System.currentTimeMillis(); log.info("stopping at {}", stop); log.info("elapsed: {}", (((double) stop - (double) start) / 1000.0)); }
bw.addMutations(mutations); bw.flush(); } catch (MutationsRejectedException e) {
bw.addMutations(mutationsCopy); } catch (MutationsRejectedException e) { log.error("Could not apply mutations to {}", tableName);
protected void deleteSingleRyaStatement(final RyaStatement stmt) throws IOException, MutationsRejectedException { final Map<TABLE_LAYOUT, Collection<Mutation>> map = ryaTableMutationsFactory.serializeDelete(stmt); bw_spo.addMutations(map.get(TABLE_LAYOUT.SPO)); bw_po.addMutations(map.get(TABLE_LAYOUT.PO)); bw_osp.addMutations(map.get(TABLE_LAYOUT.OSP)); }
protected void deleteSingleRyaStatement(final RyaStatement stmt) throws IOException, MutationsRejectedException { final Map<TABLE_LAYOUT, Collection<Mutation>> map = ryaTableMutationsFactory.serializeDelete(stmt); bw_spo.addMutations(map.get(TABLE_LAYOUT.SPO)); bw_po.addMutations(map.get(TABLE_LAYOUT.PO)); bw_osp.addMutations(map.get(TABLE_LAYOUT.OSP)); }
private void checkVersion() throws RyaDAOException, IOException, MutationsRejectedException { final String version = getVersion(); if (version == null) { //adding to core Rya tables but not Indexes final Map<TABLE_LAYOUT, Collection<Mutation>> mutationMap = ryaTableMutationsFactory.serialize(getVersionRyaStatement()); final Collection<Mutation> spo = mutationMap.get(TABLE_LAYOUT.SPO); final Collection<Mutation> po = mutationMap.get(TABLE_LAYOUT.PO); final Collection<Mutation> osp = mutationMap.get(TABLE_LAYOUT.OSP); bw_spo.addMutations(spo); bw_po.addMutations(po); bw_osp.addMutations(osp); } //TODO: Do a version check here }
private void checkVersion() throws RyaDAOException, IOException, MutationsRejectedException { final String version = getVersion(); if (version == null) { //adding to core Rya tables but not Indexes final Map<TABLE_LAYOUT, Collection<Mutation>> mutationMap = ryaTableMutationsFactory.serialize(getVersionRyaStatement()); final Collection<Mutation> spo = mutationMap.get(TABLE_LAYOUT.SPO); final Collection<Mutation> po = mutationMap.get(TABLE_LAYOUT.PO); final Collection<Mutation> osp = mutationMap.get(TABLE_LAYOUT.OSP); bw_spo.addMutations(spo); bw_po.addMutations(po); bw_osp.addMutations(osp); } //TODO: Do a version check here }
@Override public void run() { try { bw.addMutations(allMuts.get(idx)); bw.flush(); } catch (MutationsRejectedException e) { Assert.fail("Error adding mutations to batch writer"); } } });
protected void commit(final Iterator<RyaStatement> commitStatements) throws RyaDAOException { try { //TODO: Should have a lock here in case we are adding and committing at the same time while (commitStatements.hasNext()) { final RyaStatement stmt = commitStatements.next(); final Map<TABLE_LAYOUT, Collection<Mutation>> mutationMap = ryaTableMutationsFactory.serialize(stmt); final Collection<Mutation> spo = mutationMap.get(TABLE_LAYOUT.SPO); final Collection<Mutation> po = mutationMap.get(TABLE_LAYOUT.PO); final Collection<Mutation> osp = mutationMap.get(TABLE_LAYOUT.OSP); bw_spo.addMutations(spo); bw_po.addMutations(po); bw_osp.addMutations(osp); for (final AccumuloIndexer index : secondaryIndexers) { index.storeStatement(stmt); } } if (flushEachUpdate.get()) { mt_bw.flush(); } } catch (final Exception e) { throw new RyaDAOException(e); } }
@Override public void run() { ArrayList<AccumuloWriter.Mutations> exports = new ArrayList<>(); while (true) { try { exports.clear(); // gather export from all threads that have placed an item on the queue exports.add(exportQueue.take()); exportQueue.drainTo(exports); for (AccumuloWriter.Mutations ml : exports) { bw.addMutations(ml.mutations); } bw.flush(); // notify all threads waiting after flushing for (AccumuloWriter.Mutations ml : exports) { ml.cdl.countDown(); } } catch (InterruptedException | MutationsRejectedException e) { throw new RuntimeException(e); } } }
@Override public void run() { ArrayList<Mutations> exports = new ArrayList<>(); while (true) { try { exports.clear(); // gather export from all threads that have placed an item on the queue exports.add(exportQueue.take()); exportQueue.drainTo(exports); for (Mutations ml : exports) { bw.addMutations(ml.mutations); } bw.flush(); // notify all threads waiting after flushing for (Mutations ml : exports) { ml.cdl.countDown(); } } catch (InterruptedException | MutationsRejectedException e) { throw new RuntimeException(e); } } }
public static void apply(BatchWriter writer, Mutator mut) { try { writer.addMutations(mut.create()); } catch (MutationsRejectedException e) { throw new AccumuloGraphException(e); } }
public static void apply(BatchWriter writer, Mutator mut) { try { writer.addMutations(mut.create()); } catch (MutationsRejectedException e) { throw new AccumuloGraphException(e); } }
private void processBatches(ArrayList<MutationBatch> batches) throws MutationsRejectedException { for (MutationBatch mutationBatch : batches) { if (mutationBatch != end) { bw.addMutations(mutationBatch.mutations); } } bw.flush(); int numAsync = 0; for (MutationBatch mutationBatch : batches) { mutationBatch.countDown(); if (mutationBatch.isAsync) { numAsync++; } } if (numAsync > 0) { synchronized (SharedBatchWriter.this) { asyncBatchesProcessed += numAsync; SharedBatchWriter.this.notifyAll(); } } } }
private void processBatches(ArrayList<MutationBatch> batches) throws MutationsRejectedException { for (MutationBatch mutationBatch : batches) { if (mutationBatch != end) { bw.addMutations(mutationBatch.mutations); } } bw.flush(); int numAsync = 0; for (MutationBatch mutationBatch : batches) { mutationBatch.countDown(); if (mutationBatch.isAsync) { numAsync++; } } if (numAsync > 0) { synchronized (SharedBatchWriter.this) { asyncBatchesProcessed += numAsync; SharedBatchWriter.this.notifyAll(); } } } }
public static void main(String[] args) throws Exception { Opts opts = new Opts(table_name); BatchWriterOpts bwOpts = new BatchWriterOpts(); opts.parseArgs(RandomWriter.class.getName(), args, bwOpts); long start = System.currentTimeMillis(); log.info("starting at " + start + " for user " + opts.principal); try { Connector connector = opts.getConnector(); BatchWriter bw = connector.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig()); log.info("Writing " + opts.count + " mutations..."); bw.addMutations(new RandomMutationGenerator(opts.count)); bw.close(); } catch (Exception e) { log.error(e); throw e; } long stop = System.currentTimeMillis(); log.info("stopping at " + stop); log.info("elapsed: " + (((double) stop - (double) start) / 1000.0)); }
public static void main(String[] args) throws Exception { Opts opts = new Opts(table_name); BatchWriterOpts bwOpts = new BatchWriterOpts(); opts.parseArgs(RandomWriter.class.getName(), args, bwOpts); long start = System.currentTimeMillis(); log.info("starting at " + start + " for user " + opts.getPrincipal()); try { Connector connector = opts.getConnector(); BatchWriter bw = connector.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig()); log.info("Writing " + opts.count + " mutations..."); bw.addMutations(new RandomMutationGenerator(opts.count)); bw.close(); } catch (Exception e) { log.error("{}", e.getMessage(), e); throw e; } long stop = System.currentTimeMillis(); log.info("stopping at " + stop); log.info("elapsed: " + (((double) stop - (double) start) / 1000.0)); }
private void writeMetadata(final MergeParentMetadata metadata) throws MergerException { BatchWriter writer = null; try{ // Write each result. final List<Mutation> mutations = makeWriteMetadataMutations(metadata); writer = connector.createBatchWriter(mergeParentMetadataTableName, new BatchWriterConfig()); writer.addMutations(mutations); } catch (final AccumuloException | TableNotFoundException e) { throw new MergerException("Unable to set MergeParentMetadata in Accumulo", e); } finally { if (writer != null) { try { writer.close(); } catch (final MutationsRejectedException e) { throw new MergerException("Could not add results to a MergeParentMetadata table because some of the mutations were rejected.", e); } } } } }
private void writeMetadata(final MergeParentMetadata metadata) throws MergerException { BatchWriter writer = null; try{ // Write each result. final List<Mutation> mutations = makeWriteMetadataMutations(metadata); writer = connector.createBatchWriter(mergeParentMetadataTableName, new BatchWriterConfig()); writer.addMutations(mutations); } catch (final AccumuloException | TableNotFoundException e) { throw new MergerException("Unable to set MergeParentMetadata in Accumulo", e); } finally { if (writer != null) { try { writer.close(); } catch (final MutationsRejectedException e) { throw new MergerException("Could not add results to a MergeParentMetadata table because some of the mutations were rejected.", e); } } } } }