@Override public Iterator<Block> iterator() { RocksIterator iter = mDb.newIterator(mBlockMetaColumn, new ReadOptions().setPrefixSameAsStart(true)); iter.seekToFirst(); return new Iterator<Block>() { @Override public boolean hasNext() { return iter.isValid(); } @Override public Block next() { try { return new Block(Longs.fromByteArray(iter.key()), BlockMeta.parseFrom(iter.value())); } catch (Exception e) { throw new RuntimeException(e); } finally { iter.next(); } } }; }
@Override public List<BlockLocation> getLocations(long id) { RocksIterator iter = mDb.newIterator(mBlockLocationsColumn, new ReadOptions().setPrefixSameAsStart(true)); iter.seek(Longs.toByteArray(id)); List<BlockLocation> locations = new ArrayList<>(); for (; iter.isValid(); iter.next()) { try { locations.add(BlockLocation.parseFrom(iter.value())); } catch (Exception e) { throw new RuntimeException(e); } } return locations; }
/** * Creates and initializes a rocks block store. * * @param args inode store arguments */ public RocksInodeStore(InodeStoreArgs args) { mConf = args.getConf(); mBaseDir = mConf.get(PropertyKey.MASTER_METASTORE_DIR); RocksDB.loadLibrary(); mDisableWAL = new WriteOptions().setDisableWAL(true); mReadPrefixSameAsStart = new ReadOptions().setPrefixSameAsStart(true); try { initDb(); } catch (RocksDBException e) { throw new RuntimeException(e); } }
readOpts = new ReadOptions(); readOpts = readOpts.setPrefixSameAsStart(true) .setVerifyChecksums(false);
void scanRange(RocksDbKey start, RocksDbKey end, RocksDbScanCallback fn) { try (ReadOptions ro = new ReadOptions()) { ro.setTotalOrderSeek(true); RocksIterator iterator = db.newIterator(ro); for (iterator.seek(start.getRaw()); iterator.isValid(); iterator.next()) { RocksDbKey key = new RocksDbKey(iterator.key()); if (key.compareTo(end) >= 0) { // past limit, quit return; } RocksDbValue val = new RocksDbValue(iterator.value()); if (!fn.cb(key, val)) { // if cb returns false, we are done with this section of rows return; } } } }
ReadOptions ro = new ReadOptions(); ro.setTotalOrderSeek(true);
/** * Scan records by key range from a table */ @Override public BackendColumnIterator scan(String table, byte[] keyFrom, byte[] keyTo, int scanType) { assert !this.hasChanges(); ReadOptions options = new ReadOptions(); options.setTotalOrderSeek(true); // Not sure if it must be set RocksIterator itor = rocksdb().newIterator(cf(table), options); return new ColumnIterator(table, itor, keyFrom, keyTo, scanType); } }
/** * Scan records by key prefix from a table */ @Override public BackendColumnIterator scan(String table, byte[] prefix) { assert !this.hasChanges(); ReadOptions options = new ReadOptions(); // NOTE: Options.prefix_extractor is a prerequisite options.setPrefixSameAsStart(true); RocksIterator itor = rocksdb().newIterator(cf(table), options); return new ColumnIterator(table, itor, prefix, null, SCAN_PREFIX_WITH_BEGIN); }
/** * Constructor. * * <p> * The caller is responsible for invoking {@link org.rocksdb.RocksObject#close close()} on any supplied * {@code readOptions} and/or {@code writeBatch}, after this instance is {@link #close}'d of course. * * @param db database * @param readOptions read options, or null for the default * @param writeBatch batch for write operations, or null for none * @throws IllegalArgumentException if {@code db} is null */ public RocksDBKVStore(RocksDB db, ReadOptions readOptions, WriteBatch writeBatch) { this(db, readOptions != null ? readOptions : new ReadOptions(), readOptions == null, writeBatch); }
/** * Builds RocksDb {@link ReadOptions}. * * @param isTailing * @return */ public static ReadOptions buildReadOptions(boolean isTailing) { ReadOptions readOptions = new ReadOptions(); readOptions.setTailing(isTailing); return readOptions; }
private SnapshotRocksDBKVStore(RocksDB db, Snapshot snapshot) { super(db, new ReadOptions().setSnapshot(snapshot), true, null); this.snapshot = snapshot; }
/** * Returns iterators from a consistent database state across multiple * column families. Iterators are heap allocated and need to be deleted * before the db is deleted * * @param columnFamilyHandleList {@link java.util.List} containing * {@link org.rocksdb.ColumnFamilyHandle} instances. * @return {@link java.util.List} containing {@link org.rocksdb.RocksIterator} * instances * * @throws RocksDBException thrown if error happens in underlying * native library. */ public List<RocksIterator> newIterators( final List<ColumnFamilyHandle> columnFamilyHandleList) throws RocksDBException { return newIterators(columnFamilyHandleList, new ReadOptions()); }
private void writeKVStateMetaData() throws IOException { List<RegisteredKeyedBackendStateMetaInfo.Snapshot<?, ?>> metaInfoSnapshots = new ArrayList<>(stateBackend.kvStateInformation.size()); int kvStateId = 0; for (Map.Entry<String, Tuple2<ColumnFamilyHandle, RegisteredKeyedBackendStateMetaInfo<?, ?>>> column : stateBackend.kvStateInformation.entrySet()) { metaInfoSnapshots.add(column.getValue().f1.snapshot()); //retrieve iterator for this k/v states readOptions = new ReadOptions(); readOptions.setSnapshot(snapshot); kvStateIterators.add( new Tuple2<>(stateBackend.db.newIterator(column.getValue().f0, readOptions), kvStateId)); ++kvStateId; } KeyedBackendSerializationProxy<K> serializationProxy = new KeyedBackendSerializationProxy<>(stateBackend.getKeySerializer(), metaInfoSnapshots); serializationProxy.write(outputView); }
private RocksDBIterator() { this.it = db.newIterator(new ReadOptions().setFillCache(false)); it.seekToFirst(); }
private RocksDB db() { if (_db == null) { writeOptions = new WriteOptions(); writeOptions.setDisableWAL(true); writeOptions.setSync(false); readOptions = new ReadOptions(); readOptions.setFillCache(true).setVerifyChecksums(false); _db = _dbSupplier.get(); } return _db; }
private RocksDB db() { if (_db == null) { writeOptions = new WriteOptions(); writeOptions.setDisableWAL(true); writeOptions.setSync(false); readOptions = new ReadOptions(); readOptions.setFillCache(false).setVerifyChecksums(false); _db = _dbSupplier.get(); } return _db; }
private CloseableIterator<GeoWaveMetadata> prefixIterator(final byte[] prefix) { final ReadOptions options = new ReadOptions().setPrefixSameAsStart(true); final RocksIterator it = db.newIterator(options); it.seek(prefix); return new RocksDBMetadataIterator(options, it, requiresTimestamp, visibilityEnabled); }
/** * Scan records by key range from a table */ @Override public BackendColumnIterator scan(String table, byte[] keyFrom, byte[] keyTo, int scanType) { assert !this.hasChanges(); ReadOptions options = new ReadOptions(); options.setTotalOrderSeek(true); // Not sure if it must be set RocksIterator itor = rocksdb().newIterator(cf(table), options); return new ColumnIterator(table, itor, keyFrom, keyTo, scanType); } }
/** * Scan records by key prefix from a table */ @Override public BackendColumnIterator scan(String table, byte[] prefix) { assert !this.hasChanges(); ReadOptions options = new ReadOptions(); // NOTE: Options.prefix_extractor is a prerequisite options.setPrefixSameAsStart(true); RocksIterator itor = rocksdb().newIterator(cf(table), options); return new ColumnIterator(table, itor, prefix, null, SCAN_PREFIX_WITH_BEGIN); }
public synchronized CloseableIterator<GeoWaveRow> iterator() { final RocksDB readDb = getReadDb(); if (readDb == null) { return new CloseableIterator.Empty<>(); } final ReadOptions options = new ReadOptions().setFillCache(false); final RocksIterator it = readDb.newIterator(options); it.seekToFirst(); return new RocksDBRowIterator( options, it, adapterId, partition, requiresTimestamp, visibilityEnabled); }