public void close() { try { this.rocksDB.flush(new FlushOptions()); } catch (RocksDBException e) { LOG.warn("Failed to flush db before cleanup", e); } this.rocksDB.dispose(); } }
@Override public void close() { try { this.rocksDB.flush(new FlushOptions()); } catch (RocksDBException e) { LOG.warn("Failed to flush db before cleanup", e); } this.rocksDB.dispose(); }
/** * Flush the data in memtable of RocksDB into disk, and then create checkpoint * * @param checkpointId */ @Override public void checkpoint(long checkpointId) { long startTime = System.currentTimeMillis(); try { rocksDB.flush(new FlushOptions()); Checkpoint cp = Checkpoint.create(rocksDB); cp.createCheckpoint(getRocksDbCheckpointPath(checkpointId)); } catch (RocksDBException e) { LOG.error(String.format("Failed to create checkpoint for checkpointId-%d", checkpointId), e); throw new RuntimeException(e.getMessage()); } if (isEnableMetrics && JStormMetrics.enabled) rocksDbFlushAndCpLatency.update(System.currentTimeMillis() - startTime); }
try (FlushOptions flushOps = new FlushOptions()) { flushOps.setWaitForFlush(true); store.db.flush(flushOps);
db.put(handler2, String.valueOf(i % 1000).getBytes(), String.valueOf(startValue2 + i).getBytes()); if (isFlush && flushWaitTime <= System.currentTimeMillis()) { db.flush(new FlushOptions()); if (isCheckpoint) { cp.createCheckpoint(cpPath + "/" + i);
@Override public void sync() throws IOException { syncLock.lock(); try { if (this.isClosed()) { throw new IOException("hashtable [" + this.fileName + "] is close"); } try { for (RocksDB db : dbs) { db.flush(new FlushOptions().setWaitForFlush(true)); } } catch (RocksDBException e) { throw new IOException(e); } } finally { syncLock.unlock(); } }
@Override public void flush(String keySpace) { Preconditions.checkNotNull(keySpace); ByteArray byteArray = new ByteArray(keySpace); try { WriteBatch writeBatch = new WriteBatch(); WriteOptions writeOptions = new WriteOptions().setDisableWAL(true); for(Map.Entry<ByteArray, byte[]> entry: dataBatches.get(byteArray).entrySet()) { writeBatch.put( cfHandles.get(byteArray), entry.getKey().getBytes(), entry.getValue() ); } rocksDB.write(writeOptions, writeBatch); writeBatch.close(); writeOptions.close(); dataBatches.get(byteArray).clear(); FlushOptions fOptions = new FlushOptions().setWaitForFlush(true); rocksDB.flush(new FlushOptions(), cfHandles.get(byteArray)); fOptions.close(); } catch(RocksDBException ex) { throw new RuntimeException(ex); } }
public void run() { FlushOptions fo = new FlushOptions(); fo.setWaitForFlush(true); try { finalDB.flush(fo); } catch (RocksDBException e) { LOGGER.warn("RocksDB flush error", e); } finally { finalDB.close(); fo.close(); options.close(); } } });
@Override public void flush() { try { for(Map.Entry<ByteArray, Map<ByteArray, byte[]>> entry: dataBatches.entrySet()) { WriteBatch writeBatch = new WriteBatch(); WriteOptions writeOptions = new WriteOptions().setDisableWAL(true); for(Map.Entry<ByteArray, byte[]> batchEntry: entry.getValue().entrySet()) { writeBatch.put( cfHandles.get(entry.getKey()), batchEntry.getKey().getBytes(), batchEntry.getValue() ); } rocksDB.write(writeOptions, writeBatch); writeBatch.close(); writeOptions.close(); entry.getValue().clear(); } FlushOptions fOptions = new FlushOptions().setWaitForFlush(true); rocksDB.flush(fOptions); fOptions.close(); } catch(RocksDBException ex) { throw new RuntimeException(ex); } }
@Override public void close() { this.syncLock.lock(); try { this.closed = true; CommandLineProgressBar bar = new CommandLineProgressBar("Closing Hash Tables", dbs.length, System.out); int i = 0; for (RocksDB db : dbs) { try { FlushOptions op = new FlushOptions(); op.setWaitForFlush(true); db.flush(op); db.close(); } catch (Exception e) { SDFSLogger.getLog().warn("While closing hashtable ", e); } bar.update(i); i++; } bar.finish(); } finally { this.syncLock.unlock(); SDFSLogger.getLog().info("Hashtable [" + this.fileName + "] closed"); } }
@Override public void close() throws IOException { if (!closed.compareAndSet(false, true)) { return; } if (COLLECT_METRICS) { MetricUtils.removeAllMetricsThatStartWith(MetricRegistry.name(METRICS_PREFIX, name)); } exclusively((deferred) -> { deleteAllIterators(deferred); try(FlushOptions options = new FlushOptions()){ options.setWaitForFlush(true); db.flush(options, handle); } catch (RocksDBException ex) { deferred.addException(ex); } deferred.suppressingClose(handle); }); }
wOptions.setDisableWAL(true); fOptions = new FlushOptions(); fOptions.setWaitForFlush(true);
flushOpts = new FlushOptions(); flushOpts.setWaitForFlush(true);
new WriteOptions(), new FlushOptions(), new KeyValueStoreMetrics("dbStore", new MetricsRegistryMap()));
new WriteOptions(), new FlushOptions(), new KeyValueStoreMetrics("dbStore", new MetricsRegistryMap()));