@Override public RocksDbReadOptions setVerifyChecksums(boolean verifyChecksums) { super.setVerifyChecksums(verifyChecksums); return this; }
/** * If true, all data read from underlying storage will be * verified against corresponding checksums. * Default: true * * @param verifyChecksums if true, then checksum verification * will be performed on every read. * @return the reference to the current ReadOptions. */ public ReadOptions setVerifyChecksums( final boolean verifyChecksums) { assert(isOwningHandle()); setVerifyChecksums(nativeHandle_, verifyChecksums); return this; }
private RocksDB db() { if (_db == null) { writeOptions = new WriteOptions(); writeOptions.setDisableWAL(true); writeOptions.setSync(false); readOptions = new ReadOptions(); readOptions.setFillCache(false).setVerifyChecksums(false); _db = _dbSupplier.get(); } return _db; }
private RocksDB db() { if (_db == null) { writeOptions = new WriteOptions(); writeOptions.setDisableWAL(true); writeOptions.setSync(false); readOptions = new ReadOptions(); readOptions.setFillCache(true).setVerifyChecksums(false); _db = _dbSupplier.get(); } return _db; }
protected synchronized void open(Set<String> columnFamilyNames) { if (isOpen()) { return; } Map<String, String> defaultMetadata = ImmutableMap.of("version", RocksdbStorageProvider.VERSION, "serializer", "proxy"); DBConfig address = new DBConfig(path, readOnly, defaultMetadata, columnFamilyNames); this.dbhandle = RocksConnectionManager.INSTANCE.acquire(address); this.bulkReadOptions = new ReadOptions(); this.bulkReadOptions.setFillCache(false); this.bulkReadOptions.setVerifyChecksums(false); RevObjectSerializer defaultSerializer = new RevObjectSerializerProxy(); RevObjectSerializer serializer = defaultSerializer; final Optional<String> serializerValue = dbhandle.getMetadata("serializer"); if (serializerValue.isPresent()) { String sval = serializerValue.get(); Preconditions.checkState("proxy".equals(sval), "serialization factory metadata error: expected 'proxy', got '%s'", sval); } else { // pre 1.0 serializer, for backwards compatibility with repos created before initial // release serializer = new RevObjectSerializerLZF(DataStreamRevObjectSerializerV2.INSTANCE); } super.setSerializationFactory(serializer); open = true; }
protected synchronized void open(Set<String> columnFamilyNames) { if (isOpen()) { return; } Map<String, String> defaultMetadata = ImmutableMap.of("version", RocksdbStorageProvider.VERSION, "serializer", "proxy"); DBConfig address = new DBConfig(path, readOnly, defaultMetadata, columnFamilyNames); this.dbhandle = RocksConnectionManager.INSTANCE.acquire(address); this.bulkReadOptions = new ReadOptions(); this.bulkReadOptions.setFillCache(false); this.bulkReadOptions.setVerifyChecksums(false); ObjectSerializingFactory defaultSerializer = new SerializationFactoryProxy(); ObjectSerializingFactory serializer = defaultSerializer; final Optional<String> serializerValue = dbhandle.getMetadata("serializer"); if (serializerValue.isPresent()) { String sval = serializerValue.get(); Preconditions.checkState("proxy".equals(sval), "serialization factory metadata error: expected 'proxy', got '%s'", sval); } else { // pre 1.0 serializer, for backwards compatibility with repos created before initial // release serializer = new LZFSerializationFactory(DataStreamSerializationFactoryV2.INSTANCE); } super.setSerializationFactory(serializer); open = true; }
@Override public void deleteAll(Iterator<ObjectId> ids, BulkOpListener listener) { checkNotNull(ids, "argument objectId is null"); checkNotNull(listener, "argument listener is null"); checkWritable(); final boolean checkExists = !BulkOpListener.NOOP_LISTENER.equals(listener); byte[] keybuff = new byte[ObjectId.NUM_BYTES]; try (RocksDBReference dbRef = dbhandle.getReference(); ReadOptions ro = new ReadOptions()) { ro.setFillCache(false); ro.setVerifyChecksums(false); try (WriteOptions writeOps = new WriteOptions(); // WriteBatch batch = new WriteBatch()) { writeOps.setSync(true); while (ids.hasNext()) { ObjectId id = ids.next(); id.getRawValue(keybuff); if (!checkExists || exists(dbRef, ro, keybuff)) { batch.delete(keybuff); listener.deleted(id); } else { listener.notFound(id); } } dbRef.db().write(writeOps, batch); } catch (RocksDBException e) { throw new RuntimeException(e); } } }
ro.setVerifyChecksums(false); try (WriteOptions writeOps = new WriteOptions(); // WriteBatch batch = new WriteBatch()) {
public RocksdbNodeStore(RocksDB db) { this.db = db; try { // enable bloom filter to speed up RocksDB.get() calls BlockBasedTableConfig tableFormatConfig = new BlockBasedTableConfig(); bloomFilter = new BloomFilter(); tableFormatConfig.setFilter(bloomFilter); colFamilyOptions = new ColumnFamilyOptions(); colFamilyOptions.setTableFormatConfig(tableFormatConfig); byte[] tableNameKey = "nodes".getBytes(Charsets.UTF_8); ColumnFamilyDescriptor columnDescriptor = new ColumnFamilyDescriptor(tableNameKey, colFamilyOptions); column = db.createColumnFamily(columnDescriptor); } catch (RocksDBException e) { throw Throwables.propagate(e); } this.writeOptions = new WriteOptions(); writeOptions.setDisableWAL(true); writeOptions.setSync(false); readOptions = new ReadOptions(); readOptions.setFillCache(false).setVerifyChecksums(false); }
public RocksdbDAGStore(RocksDB db) { this.db = db; try { // enable bloom filter to speed up RocksDB.get() calls BlockBasedTableConfig tableFormatConfig = new BlockBasedTableConfig(); bloomFilter = new BloomFilter(); tableFormatConfig.setFilter(bloomFilter); colFamilyOptions = new ColumnFamilyOptions(); colFamilyOptions.setTableFormatConfig(tableFormatConfig); byte[] tableNameKey = "trees".getBytes(Charsets.UTF_8); ColumnFamilyDescriptor columnDescriptor = new ColumnFamilyDescriptor(tableNameKey, colFamilyOptions); column = db.createColumnFamily(columnDescriptor); } catch (RocksDBException e) { throw Throwables.propagate(e); } writeOptions = new WriteOptions(); writeOptions.setDisableWAL(true); writeOptions.setSync(false); readOptions = new ReadOptions(); readOptions.setFillCache(false).setVerifyChecksums(false); }