public RocksdbDAGStore(Supplier<RocksDB> db) { this._dbSupplier = db; boolean overwriteKey = true; this.batchOptions = new DBOptions(); this.batch = new WriteBatchWithIndex(overwriteKey); }
/** * Similar to {@link RocksDB#get(byte[])} but will also * read writes from this batch. * * This function will query both this batch and the DB and then merge * the results using the DB's merge operator (if the batch contains any * merge requests). * * Setting {@link ReadOptions#setSnapshot(long, long)} will affect what is * read from the DB but will NOT change which keys are read from the batch * (the keys in this batch do not yet belong to any snapshot and will be * fetched regardless). * * @param db The Rocks database * @param options The read options to use * @param key The key to read the value for * * @return a byte array storing the value associated with the input key if * any. null if it does not find the specified key. * * @throws RocksDBException if the value for the key cannot be read */ public byte[] getFromBatchAndDB(final RocksDB db, final ReadOptions options, final byte[] key) throws RocksDBException { return getFromBatchAndDB(nativeHandle_, db.nativeHandle_, options.nativeHandle_, key, key.length); }
/** * Similar to {@link RocksDB#get(byte[])} but will only * read the key from this batch. * * @param options The database options to use * @param key The key to read the value for * * @return a byte array storing the value associated with the input key if * any. null if it does not find the specified key. * * @throws RocksDBException if the batch does not have enough data to resolve * Merge operations, MergeInProgress status may be returned. */ public byte[] getFromBatch(final DBOptions options, final byte[] key) throws RocksDBException { return getFromBatch(nativeHandle_, options.nativeHandle_, key, key.length); }
public Map<NodeId, DAGNode> getAll(Set<NodeId> nodeIds) { if (nodeIds.isEmpty()) { return ImmutableMap.of(); } Map<NodeId, DAGNode> res = new HashMap<>(); lock.readLock().lock(); try { for (NodeId id : nodeIds) { byte[] key = toKey(id); byte[] val; if (_db == null) { val = batch.getFromBatch(batchOptions, key); } else { val = batch.getFromBatchAndDB(db(), readOptions, key); } Preconditions.checkState(val != null); DAGNode node = decode(val); res.put(id, node); } } catch (RocksDBException e) { throw new RuntimeException(e); } finally { lock.readLock().unlock(); } return res; }
private void putInternal(byte[] key, DAG dag) { byte[] value = encode(dag); try { batch.put(key, value); } catch (RocksDBException e) { throw new RuntimeException(e); } }
/** * <p>Private WriteBatchWithIndex constructor which is used to construct * WriteBatchWithIndex instances from C++ side. As the reference to this * object is also managed from C++ side the handle will be disowned.</p> * * @param nativeHandle address of native instance. */ WriteBatchWithIndex(final long nativeHandle) { super(nativeHandle); disOwnNativeHandle(); }
/** * Create an iterator of the default column family. User can call * {@link org.rocksdb.RocksIteratorInterface#seek(byte[])} to * search to the next entry of or after a key. Keys will be iterated in the * order given by index_comparator. For multiple updates on the same key, * each update will be returned as a separate entry, in the order of update * time. * * @return An iterator for the Write Batch contents */ public WBWIRocksIterator newIterator() { return new WBWIRocksIterator(this, iterator0(nativeHandle_)); }
/** * Create an iterator of a column family. User can call * {@link org.rocksdb.RocksIteratorInterface#seek(byte[])} to * search to the next entry of or after a key. Keys will be iterated in the * order given by index_comparator. For multiple updates on the same key, * each update will be returned as a separate entry, in the order of update * time. * * @param columnFamilyHandle The column family to iterate over * @return An iterator for the Write Batch contents, restricted to the column * family */ public WBWIRocksIterator newIterator( final ColumnFamilyHandle columnFamilyHandle) { return new WBWIRocksIterator(this, iterator1(nativeHandle_, columnFamilyHandle.nativeHandle_)); }
public void put(NodeId nodeId, DAGNode node) { byte[] key = toKey(nodeId); byte[] value = encode(node); lock.writeLock().lock(); try { batch.put(key, value); flush(); } catch (RocksDBException e) { throw new RuntimeException(e); } finally { lock.writeLock().unlock(); } }
/** * Fetch the underlying write batch that contains all pending changes to be * committed. * * Note: You should not write or delete anything from the batch directly and * should only use the functions in the {@link Transaction} class to * write to this transaction. * * @return The write batch */ public WriteBatchWithIndex getWriteBatch() { assert(isOwningHandle()); final WriteBatchWithIndex writeBatchWithIndex = new WriteBatchWithIndex(getWriteBatch(nativeHandle_)); return writeBatchWithIndex; }
/** * Similar to {@link RocksDB#get(ColumnFamilyHandle, byte[])} but will also * read writes from this batch. * * This function will query both this batch and the DB and then merge * the results using the DB's merge operator (if the batch contains any * merge requests). * * Setting {@link ReadOptions#setSnapshot(long, long)} will affect what is * read from the DB but will NOT change which keys are read from the batch * (the keys in this batch do not yet belong to any snapshot and will be * fetched regardless). * * @param db The Rocks database * @param columnFamilyHandle The column family to retrieve the value from * @param options The read options to use * @param key The key to read the value for * * @return a byte array storing the value associated with the input key if * any. null if it does not find the specified key. * * @throws RocksDBException if the value for the key cannot be read */ public byte[] getFromBatchAndDB(final RocksDB db, final ColumnFamilyHandle columnFamilyHandle, final ReadOptions options, final byte[] key) throws RocksDBException { return getFromBatchAndDB(nativeHandle_, db.nativeHandle_, options.nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_); }
public void putAll(Map<NodeId, DAGNode> nodeMappings) { ByteArrayOutputStream out = new ByteArrayOutputStream(); lock.writeLock().lock(); try { nodeMappings.forEach((nodeId, dagNode) -> { out.reset(); encode(dagNode, out); byte[] value = out.toByteArray(); byte[] key = toKey(nodeId); try { batch.put(key, value); } catch (RocksDBException e) { throw new RuntimeException(e); } }); flush(); } finally { lock.writeLock().unlock(); } }
/** * Similar to {@link RocksDB#get(ColumnFamilyHandle, byte[])} but will only * read the key from this batch. * * @param columnFamilyHandle The column family to retrieve the value from * @param options The database options to use * @param key The key to read the value for * * @return a byte array storing the value associated with the input key if * any. null if it does not find the specified key. * * @throws RocksDBException if the batch does not have enough data to resolve * Merge operations, MergeInProgress status may be returned. */ public byte[] getFromBatch(final ColumnFamilyHandle columnFamilyHandle, final DBOptions options, final byte[] key) throws RocksDBException { return getFromBatch(nativeHandle_, options.nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_); }
/** * Atomically inserts the graph entries for all the provided commits. * * @param commits the commits to create the graph entries for (one entry for each commit/parent) * * @implNote uses a {@link WriteBatchWithIndex} to query uncommitted data through * {@link WriteBatchWithIndex#getFromBatchAndDB getFromBatchAndDB} */ public void putAll(Iterable<RevCommit> commits) { int count = 0; final Stopwatch sw = LOG.isTraceEnabled() ? Stopwatch.createStarted() : null; try (WriteBatchWithIndex batch = new WriteBatchWithIndex(); // RocksDBReference dbRef = dbhandle.getReference(); WriteOptions wo = new WriteOptions()) { wo.setSync(true); for (RevCommit c : commits) { ObjectId commitId = c.getId(); ImmutableList<ObjectId> parentIds = c.getParentIds(); put(dbRef, commitId, parentIds, batch); count++; } dbRef.db().write(wo, batch); } catch (Exception e) { throw Throwables.propagate(e); } if (LOG.isTraceEnabled()) { LOG.trace(String.format("Inserted %,d graph mappings in %s", count, sw.stop())); } }
private NodeData getNodeInternal(final RocksDBReference dbRef, final ObjectId id, final boolean failIfNotFound, @Nullable WriteBatchWithIndex batch) { Preconditions.checkNotNull(id, "id"); byte[] key = id.getRawValue(); byte[] data; try (ReadOptions ro = new ReadOptions()) { final RocksDB db = dbRef.db(); if (batch == null) { data = db.get(key); } else { data = batch.getFromBatchAndDB(db, ro, key); } } catch (RocksDBException e) { throw propagate(e); } if (null == data) { if (failIfNotFound) { throw new IllegalArgumentException("Graph Object does not exist: " + id); } return null; } NodeData node = BINDING.entryToObject(data); return node; }
public void save(Map<TreeId, DAG> dags) { lock.writeLock().lock(); try { ByteArrayOutputStream buff = new ByteArrayOutputStream(); ByteArrayDataOutput out = ByteStreams.newDataOutput(buff); for (DAG d : dags.values()) { buff.reset(); byte[] key = toKey(d.getId()); byte[] value = encode(d, out); batch.put(key, value); } flush(); } catch (RocksDBException e) { throw new RuntimeException(e); } finally { lock.writeLock().unlock(); } }
@Nullable private DAG getInternal(TreeId id, final byte[] key) { DAG dag = null; try { byte[] value = batch.getFromBatch(batchOptions, key); if (value == null && _db != null) { value = _db.get(readOptions, key); } if (null != value) { dag = decode(id, value); } } catch (Exception e) { throw new RuntimeException(e); } return dag; }
/** * Atomically inserts the graph entries for all the provided commits. * * @param commits the commits to create the graph entries for (one entry for each commit/parent) * * @implNote uses a {@link WriteBatchWithIndex} to query uncommitted data through * {@link WriteBatchWithIndex#getFromBatchAndDB getFromBatchAndDB} */ public void putAll(Iterable<RevCommit> commits) { int count = 0; final Stopwatch sw = LOG.isTraceEnabled() ? Stopwatch.createStarted() : null; try (WriteBatchWithIndex batch = new WriteBatchWithIndex(); // RocksDBReference dbRef = dbhandle.getReference(); WriteOptions wo = new WriteOptions()) { wo.setSync(true); for (RevCommit c : commits) { ObjectId commitId = c.getId(); ImmutableList<ObjectId> parentIds = c.getParentIds(); put(dbRef, commitId, parentIds, batch); count++; } dbRef.db().write(wo, batch); } catch (RocksDBException e) { throw new RuntimeException(e); } if (LOG.isTraceEnabled()) { LOG.trace(String.format("Inserted %,d graph mappings in %s", count, sw.stop())); } }