public void remove( @Nonnull ColumnFamilyHandle handle, @Nonnull byte[] key) throws RocksDBException { batch.remove(handle, key); if (batch.count() == capacity) { flush(); } }
@Override public void remove() { if (currentEntry == null || currentEntry.deleted) { throw new IllegalStateException("The remove operation must be called after a valid next operation."); } currentEntry.remove(); }
@Override public void updateBatch(Map<byte[], byte[]> rows) { resetDbLock.readLock().lock(); try { if (logger.isTraceEnabled()) logger.trace("~> RocksDbDataSource.updateBatch(): " + name + ", " + rows.size()); try { try (WriteBatch batch = new WriteBatch(); WriteOptions writeOptions = new WriteOptions()) { for (Map.Entry<byte[], byte[]> entry : rows.entrySet()) { if (entry.getValue() == null) { batch.remove(entry.getKey()); } else { batch.put(entry.getKey(), entry.getValue()); } } db.write(writeOptions, batch); } if (logger.isTraceEnabled()) logger.trace("<~ RocksDbDataSource.updateBatch(): " + name + ", " + rows.size()); } catch (RocksDBException e) { logger.error("Error in batch update on db '{}'", name, e); hintOnTooManyOpenFiles(e); throw new RuntimeException(e); } } finally { resetDbLock.readLock().unlock(); } }
void deleteMetrics(FilterOptions filter) throws MetricException { try (WriteBatch writeBatch = new WriteBatch(); WriteOptions writeOps = new WriteOptions()) { scanRaw(filter, (RocksDbKey key, RocksDbValue value) -> { writeBatch.remove(key.getRaw()); return true; }); if (writeBatch.count() > 0) { LOG.info("Deleting {} metrics", writeBatch.count()); try { db.write(writeOps, writeBatch); } catch (Exception e) { String message = "Failed delete metrics"; LOG.error(message, e); if (this.failureMeter != null) { this.failureMeter.mark(); } throw new MetricException(message, e); } } } }
@Override public void clear() { try { try (RocksIteratorWrapper iterator = RocksDBKeyedStateBackend.getRocksIterator(backend.db, columnFamily); WriteBatch writeBatch = new WriteBatch(128)) { final byte[] keyPrefixBytes = serializeCurrentKeyWithGroupAndNamespace(); iterator.seek(keyPrefixBytes); while (iterator.isValid()) { byte[] keyBytes = iterator.key(); if (startWithKeyPrefix(keyPrefixBytes, keyBytes)) { writeBatch.remove(columnFamily, keyBytes); } else { break; } iterator.next(); } backend.db.write(writeOptions, writeBatch); } } catch (Exception e) { LOG.warn("Error while cleaning the state.", e); } }
writeBatch.remove(key.getRaw());
/** * Delete a record by key from a table */ @Override public void remove(String table, byte[] key) { this.batch.remove(cf(table), key); }
/** * Delete a record by key from a table */ @Override public void remove(String table, byte[] key) { this.batch.remove(cf(table), key); }
public void remove( @Nonnull ColumnFamilyHandle handle, @Nonnull byte[] key) throws RocksDBException { batch.remove(handle, key); if (batch.count() == capacity) { flush(); } }
public void remove( @Nonnull ColumnFamilyHandle handle, @Nonnull byte[] key) throws RocksDBException { batch.remove(handle, key); if (batch.count() == capacity) { flush(); } }
@Override public void remove() { if (currentEntry == null || currentEntry.deleted) { throw new IllegalStateException("The remove operation must be called after a valid next operation."); } currentEntry.remove(); }
@Override public void remove() { if (currentEntry == null || currentEntry.deleted) { throw new IllegalStateException("The remove operation must be called after a valid next operation."); } currentEntry.remove(); }
@Override public void remove(byte[] key) { key.getClass(); Preconditions.checkState(!this.closed, "closed"); this.cursorTracker.poll(); if (this.writeBatch != null) { assert RocksDBUtil.isInitialized(this.writeBatch); synchronized (this.writeBatch) { this.writeBatch.remove(key); } } else { assert RocksDBUtil.isInitialized(this.db); try { this.db.remove(key); } catch (RocksDBException e) { throw new RuntimeException("RocksDB error", e); } } }
@Override public void removeConflicts(@Nullable String txId, Iterable<String> paths) { Optional<RocksDBReference> dbRefOpt = getDb(txId); if (!dbRefOpt.isPresent()) { return; } try (RocksDBReference dbRef = dbRefOpt.get(); WriteOptions writeOptions = new WriteOptions(); WriteBatch batch = new WriteBatch()) { writeOptions.setSync(true); for (String path : paths) { batch.remove(key(path)); } dbRef.db().write(writeOptions, batch); } catch (RocksDBException e) { throw new RuntimeException(e); } }
@Override public void removeConflicts(@Nullable String txId, Iterable<String> paths) { Optional<RocksDBReference> dbRefOpt = getDb(txId); if (!dbRefOpt.isPresent()) { return; } try (RocksDBReference dbRef = dbRefOpt.get(); WriteOptions writeOptions = new WriteOptions(); WriteBatch batch = new WriteBatch()) { writeOptions.setSync(true); for (String path : paths) { batch.remove(key(path)); } dbRef.db().write(writeOptions, batch); } catch (RocksDBException e) { propagate(e); } }
@Override public void clear() { putBuffer.clear(); try (WriteBatch batch = new WriteBatch()) { try (RocksIterator it = db.newIterator()) { it.seekToFirst(); while (it.isValid()) { byte[] key = it.key(); batch.remove(key); it.next(); } } try (WriteOptions opts = new WriteOptions()) { db.write(opts, batch); } } catch (RocksDBException e) { propagate(e); } }
@Override public void delete(TableReference tableRef, Multimap<Cell, Long> keys) { try (Disposer d = new Disposer(); ColumnFamily table = columnFamilies.get(tableRef.getQualifiedName())) { WriteOptions options = d.register(new WriteOptions().setSync(writeOptions.fsyncPut())); WriteBatch batch = d.register(new WriteBatch()); for (Entry<Cell, Long> entry : keys.entries()) { byte[] key = RocksDbKeyValueServices.getKey(entry.getKey(), entry.getValue()); batch.remove(table.getHandle(), key); } getDb().write(options, batch); } catch (RocksDBException e) { throw Throwables.propagate(e); } }
@Override public void clear() { try { try (RocksIteratorWrapper iterator = RocksDBKeyedStateBackend.getRocksIterator(backend.db, columnFamily); WriteBatch writeBatch = new WriteBatch(128)) { final byte[] keyPrefixBytes = serializeCurrentKeyAndNamespace(); iterator.seek(keyPrefixBytes); while (iterator.isValid()) { byte[] keyBytes = iterator.key(); if (startWithKeyPrefix(keyPrefixBytes, keyBytes)) { writeBatch.remove(columnFamily, keyBytes); } else { break; } iterator.next(); } backend.db.write(writeOptions, writeBatch); } } catch (Exception e) { LOG.warn("Error while cleaning the state.", e); } }
@Override public void truncate() { try (RocksDBReference dbRef = dbhandle.getReference()) { try (RocksIterator it = dbRef.db().newIterator()) { it.seekToFirst(); try (WriteOptions wo = new WriteOptions(); // WriteBatch batch = new WriteBatch()) { wo.setSync(true); while (it.isValid()) { batch.remove(it.key()); it.next(); } dbRef.db().write(wo, batch); } } catch (RocksDBException e) { throw propagate(e); } } }
@Override public void truncate() { try (RocksDBReference dbRef = dbhandle.getReference()) { try (RocksIterator it = dbRef.db().newIterator()) { it.seekToFirst(); try (WriteOptions wo = new WriteOptions(); // WriteBatch batch = new WriteBatch()) { wo.setSync(true); while (it.isValid()) { batch.remove(it.key()); it.next(); } dbRef.db().write(wo, batch); } } catch (RocksDBException e) { throw new RuntimeException(e); } } }