private void processBatchInsert(TreeMap<RocksDbKey, RocksDbValue> batchMap) throws MetricException { try (WriteBatch writeBatch = new WriteBatch()) { // take the batched metric data and write to the database for (RocksDbKey k : batchMap.keySet()) { RocksDbValue v = batchMap.get(k); writeBatch.put(k.getRaw(), v.getRaw()); } store.db.write(writeOpts, writeBatch); } catch (Exception e) { String message = "Failed to store data to RocksDB"; LOG.error(message, e); throw new MetricException(message, e); } }
@Override public void clear() { try { try (RocksIteratorWrapper iterator = RocksDBKeyedStateBackend.getRocksIterator(backend.db, columnFamily); WriteBatch writeBatch = new WriteBatch(128)) { final byte[] keyPrefixBytes = serializeCurrentKeyWithGroupAndNamespace(); iterator.seek(keyPrefixBytes); while (iterator.isValid()) { byte[] keyBytes = iterator.key(); if (startWithKeyPrefix(keyPrefixBytes, keyBytes)) { writeBatch.remove(columnFamily, keyBytes); } else { break; } iterator.next(); } backend.db.write(writeOptions, writeBatch); } } catch (Exception e) { LOG.warn("Error while cleaning the state.", e); } }
@Override public void removeChild(Long parentId, String childName) { try { mBatch.delete(mEdgesColumn, RocksUtils.toByteArray(parentId, childName)); } catch (RocksDBException e) { throw new RuntimeException(e); } }
void deleteMetrics(FilterOptions filter) throws MetricException { try (WriteBatch writeBatch = new WriteBatch(); WriteOptions writeOps = new WriteOptions()) { scanRaw(filter, (RocksDbKey key, RocksDbValue value) -> { writeBatch.remove(key.getRaw()); return true; }); if (writeBatch.count() > 0) { LOG.info("Deleting {} metrics", writeBatch.count()); try { db.write(writeOps, writeBatch); } catch (Exception e) { String message = "Failed delete metrics"; LOG.error(message, e); if (this.failureMeter != null) { this.failureMeter.mark(); } throw new MetricException(message, e); } } } }
private void restoreAllInternal(final Collection<KeyValue<byte[], byte[]>> records) { try (final WriteBatch batch = new WriteBatch()) { for (final KeyValue<byte[], byte[]> record : records) { if (record.value == null) { batch.delete(record.key); } else { batch.put(record.key, record.value); } } write(batch); } catch (final RocksDBException e) { throw new ProcessorStateException("Error restoring batch to store " + name, e); } }
@Override public void putBatch(Map<String, Object> map) { WriteOptions writeOpts = null; WriteBatch writeBatch = null; try { writeOpts = new WriteOptions(); writeBatch = new WriteBatch(); for (Entry<String, Object> entry : map.entrySet()) { String key = entry.getKey(); Object value = entry.getValue(); byte[] data = serialize(value); if (StringUtils.isBlank(key) || data == null || data.length == 0) { continue; } byte[] keyByte = key.getBytes(); writeBatch.put(keyByte, data); } db.write(writeOpts, writeBatch); } catch (Exception e) { LOG.error("Failed to putBatch into DB, " + map.keySet(), e); } finally { if (writeOpts != null) { writeOpts.dispose(); } if (writeBatch != null) { writeBatch.dispose(); } } }
public RocksDBWriteBatchWrapper(@Nonnull RocksDB rocksDB, @Nullable WriteOptions options, int capacity) { Preconditions.checkArgument(capacity >= MIN_CAPACITY && capacity <= MAX_CAPACITY, "capacity should be between " + MIN_CAPACITY + " and " + MAX_CAPACITY); this.db = rocksDB; this.options = options; this.capacity = capacity; this.batch = new WriteBatch(this.capacity * PER_RECORD_BYTES); }
@Override public void updateBatch(Map<byte[], byte[]> rows) { resetDbLock.readLock().lock(); try { if (logger.isTraceEnabled()) logger.trace("~> RocksDbDataSource.updateBatch(): " + name + ", " + rows.size()); try { try (WriteBatch batch = new WriteBatch(); WriteOptions writeOptions = new WriteOptions()) { for (Map.Entry<byte[], byte[]> entry : rows.entrySet()) { if (entry.getValue() == null) { batch.remove(entry.getKey()); } else { batch.put(entry.getKey(), entry.getValue()); } } db.write(writeOptions, batch); } if (logger.isTraceEnabled()) logger.trace("<~ RocksDbDataSource.updateBatch(): " + name + ", " + rows.size()); } catch (RocksDBException e) { logger.error("Error in batch update on db '{}'", name, e); hintOnTooManyOpenFiles(e); throw new RuntimeException(e); } } finally { resetDbLock.readLock().unlock(); } }
@Override public void flush() { try { for(Map.Entry<ByteArray, Map<ByteArray, byte[]>> entry: dataBatches.entrySet()) { WriteBatch writeBatch = new WriteBatch(); WriteOptions writeOptions = new WriteOptions().setDisableWAL(true); for(Map.Entry<ByteArray, byte[]> batchEntry: entry.getValue().entrySet()) { writeBatch.put( cfHandles.get(entry.getKey()), batchEntry.getKey().getBytes(), batchEntry.getValue() ); } rocksDB.write(writeOptions, writeBatch); writeBatch.close(); writeOptions.close(); entry.getValue().clear(); } FlushOptions fOptions = new FlushOptions().setWaitForFlush(true); rocksDB.flush(fOptions); fOptions.close(); } catch(RocksDBException ex) { throw new RuntimeException(ex); } }
@Override public void addChild(Long parentId, String childName, Long childId) { try { mBatch.put(mEdgesColumn, RocksUtils.toByteArray(parentId, childName), Longs.toByteArray(childId)); } catch (RocksDBException e) { throw new RuntimeException(e); } }
try (final WriteBatch batch = new WriteBatch()) { final byte[] max = range.getMax(); if (min != null && max != null && ByteUtil.isConsecutive(min, max)) batch.remove(min); else { try (RocksDBKVStore.Iterator i = this.kv.createIterator(iteratorOptions, min, max, false)) { while (i.hasNext()) batch.remove(i.next().getKey()); batch.put(entry.getKey(), entry.getValue()); batch.merge(entry.getKey(), this.kv.encodeCounter(entry.getValue()));
int rndByteOffset = 0; final WriteBatch batch = new WriteBatch(); final WriteOptions opt = new WriteOptions(); for (int i = 0; i < keys.length; i++) { wvb.putInt(0, key); batch.put(wkb.byteArray(), wvb.byteArray()); if (i % batchSize == 0) { try { throw new IOException(ex); batch.clear(); throw new IOException(ex); batch.clear();
public @Override void clear() { try (WriteBatch batch = new WriteBatch()) { try (RocksIterator it = db.newIterator()) { it.seekToFirst(); while (it.isValid()) { byte[] key = it.key(); batch.delete(key); it.next(); } } try (WriteOptions opts = new WriteOptions()) { db.write(opts, batch); } } catch (RocksDBException e) { throw new RuntimeException(e); } finally { size.set(0); } }
@Override public void remove() { if (currentEntry == null || currentEntry.deleted) { throw new IllegalStateException("The remove operation must be called after a valid next operation."); } currentEntry.remove(); }
/** * Commit all updates(put/delete) to DB */ @Override public Integer commit() { int count = this.batch.count(); if (count <= 0) { return 0; } try { rocksdb().write(this.writeOptions, this.batch); } catch (RocksDBException e) { //this.batch.rollbackToSavePoint(); throw new BackendException(e); } // Clear batch if write() successfully (retained if failed) this.batch.clear(); return count; }
public void flush() throws RocksDBException { if (options != null) { db.write(options, batch); } else { // use the default WriteOptions, if wasn't provided. try (WriteOptions writeOptions = new WriteOptions()) { db.write(writeOptions, batch); } } batch.clear(); }
/** * Delete blind should be call as the last op in the delete operations. * Since we need to modify endKey to make {@link WriteBatch#deleteRange(byte[], byte[])} * delete the end key. */ void deleteBlind(WriteBatch batch, byte[] key, @Nullable byte[] endKey) { try { if (null == endKey) { batch.delete(key); } else { Pair<byte[], byte[]> realRange = getRealRange(key, endKey); endKey = realRange.getRight(); ++endKey[endKey.length - 1]; batch.deleteRange(realRange.getLeft(), endKey); } } catch (RocksDBException e) { throw new StateStoreRuntimeException(e); } }
@Override public void close() throws RocksDBException { if (batch.count() != 0) { flush(); } IOUtils.closeQuietly(batch); } }