flushOps.setWaitForFlush(true); store.db.flush(flushOps); } catch (RocksDBException e) {
/** * Set if the flush operation shall block until it terminates. * * @param waitForFlush boolean value indicating if the flush * operations waits for termination of the flush process. * * @return instance of current FlushOptions. */ public FlushOptions setWaitForFlush(final boolean waitForFlush) { assert(isOwningHandle()); setWaitForFlush(nativeHandle_, waitForFlush); return this; }
@Override public void sync() throws IOException { syncLock.lock(); try { if (this.isClosed()) { throw new IOException("hashtable [" + this.fileName + "] is close"); } try { for (RocksDB db : dbs) { db.flush(new FlushOptions().setWaitForFlush(true)); } } catch (RocksDBException e) { throw new IOException(e); } } finally { syncLock.unlock(); } }
@Override public void flush() { try { for(Map.Entry<ByteArray, Map<ByteArray, byte[]>> entry: dataBatches.entrySet()) { WriteBatch writeBatch = new WriteBatch(); WriteOptions writeOptions = new WriteOptions().setDisableWAL(true); for(Map.Entry<ByteArray, byte[]> batchEntry: entry.getValue().entrySet()) { writeBatch.put( cfHandles.get(entry.getKey()), batchEntry.getKey().getBytes(), batchEntry.getValue() ); } rocksDB.write(writeOptions, writeBatch); writeBatch.close(); writeOptions.close(); entry.getValue().clear(); } FlushOptions fOptions = new FlushOptions().setWaitForFlush(true); rocksDB.flush(fOptions); fOptions.close(); } catch(RocksDBException ex) { throw new RuntimeException(ex); } }
public void run() { FlushOptions fo = new FlushOptions(); fo.setWaitForFlush(true); try { finalDB.flush(fo); } catch (RocksDBException e) { LOGGER.warn("RocksDB flush error", e); } finally { finalDB.close(); fo.close(); options.close(); } } });
@Override public void flush(String keySpace) { Preconditions.checkNotNull(keySpace); ByteArray byteArray = new ByteArray(keySpace); try { WriteBatch writeBatch = new WriteBatch(); WriteOptions writeOptions = new WriteOptions().setDisableWAL(true); for(Map.Entry<ByteArray, byte[]> entry: dataBatches.get(byteArray).entrySet()) { writeBatch.put( cfHandles.get(byteArray), entry.getKey().getBytes(), entry.getValue() ); } rocksDB.write(writeOptions, writeBatch); writeBatch.close(); writeOptions.close(); dataBatches.get(byteArray).clear(); FlushOptions fOptions = new FlushOptions().setWaitForFlush(true); rocksDB.flush(new FlushOptions(), cfHandles.get(byteArray)); fOptions.close(); } catch(RocksDBException ex) { throw new RuntimeException(ex); } }
@Override public void close() { this.syncLock.lock(); try { this.closed = true; CommandLineProgressBar bar = new CommandLineProgressBar("Closing Hash Tables", dbs.length, System.out); int i = 0; for (RocksDB db : dbs) { try { FlushOptions op = new FlushOptions(); op.setWaitForFlush(true); db.flush(op); db.close(); } catch (Exception e) { SDFSLogger.getLog().warn("While closing hashtable ", e); } bar.update(i); i++; } bar.finish(); } finally { this.syncLock.unlock(); SDFSLogger.getLog().info("Hashtable [" + this.fileName + "] closed"); } }
@Override public void close() throws IOException { if (!closed.compareAndSet(false, true)) { return; } if (COLLECT_METRICS) { MetricUtils.removeAllMetricsThatStartWith(MetricRegistry.name(METRICS_PREFIX, name)); } exclusively((deferred) -> { deleteAllIterators(deferred); try(FlushOptions options = new FlushOptions()){ options.setWaitForFlush(true); db.flush(options, handle); } catch (RocksDBException ex) { deferred.addException(ex); } deferred.suppressingClose(handle); }); }
fOptions.setWaitForFlush(true);
flushOpts.setWaitForFlush(true);