private ScopedDirectoryLayer(@Nonnull FDBDatabase database, @Nullable KeySpacePath path, @Nonnull Supplier<Tuple> pathTuple) { super(database, path); if (path == null) { this.baseSubspace = new Subspace(); this.contentSubspace = new Subspace(); this.infoString = "ScopedDirectoryLayer:GLOBAL"; } else { this.baseSubspace = new Subspace(pathTuple.get()); this.contentSubspace = new Subspace(RESERVED_CONTENT_SUBSPACE_PREFIX); this.infoString = "ScopedDirectoryLayer:" + path.toString(); } this.nodeSubspace = new Subspace(Bytes.concat(baseSubspace.getKey(), DirectoryLayer.DEFAULT_NODE_SUBSPACE.getKey())); this.directoryLayer = new DirectoryLayer(nodeSubspace, contentSubspace); this.stateSubspace = nodeSubspace.get(STATE_SUBSPACE_KEY_SUFFIX); this.hashCode = Objects.hash(ScopedDirectoryLayer.class, baseSubspace, database); }
private CompletableFuture<Long> countRange(ReadTransactionContext tc, int level, byte[] beginKey, byte[] endKey) { return tc.readAsync(tr -> AsyncUtil.mapIterable(tr.getRange(beginKey == null ? subspace.range(Tuple.from(level)).begin : subspace.pack(Tuple.from(level, beginKey)), endKey == null ? subspace.range(Tuple.from(level)).end : subspace.pack(Tuple.from(level, endKey))), keyValue -> decodeLong(keyValue.getValue())) .asList() .thenApply(longs -> longs.stream().reduce(0L, Long::sum))); }
protected String toDebugString(ReadTransactionContext tc) { return tc.read(tr -> { StringBuilder str = new StringBuilder(); for (int level = 0; level < nlevels; ++level) { if (level > 0) { str.setLength(str.length() - 2); str.append("\n"); } str.append("L").append(level).append(": "); for (KeyValue kv : tr.getRange(subspace.range(Tuple.from(level)))) { byte[] key = subspace.unpack(kv.getKey()).getBytes(1); long count = decodeLong(kv.getValue()); str.append("'").append(ByteArrayUtil2.loggable(key)).append("': ").append(count).append(", "); } } return str.toString(); }); }
final Subspace logSubspace = DirectoryLayer.getDefault().createOrOpen(db, PathUtil.from(getClass().getName(), "log")).get(); db.run(tr -> { tr.clear(bmSubspace.range()); tr.clear(logSubspace.range()); tr.set(logSubspace.getKey(), new byte[0]); tr.set(ByteArrayUtil.join(logSubspace.getKey(), new byte[]{(byte)0xff}), new byte[0]); return null; }); Tuple.from("PUT", key, value).pack() ); futures[i] = workerMap.put(tr, bmSubspace.subspace(Tuple.from(i)), key, value); } else { futures[i] = AsyncUtil.DONE; op = workerMap.get(tr, bmSubspace.get(mapIndex), key).thenAccept(optionalValue -> tr.mutate(MutationType.SET_VERSIONSTAMPED_KEY, getLogKey(logSubspace, mapIndex, localOrder), Tuple.from("GET", key, optionalValue.orElse(null)).pack()) ); op = workerMap.containsKey(tr, bmSubspace.subspace(Tuple.from(mapIndex)), key).thenAccept(wasPresent -> tr.mutate(MutationType.SET_VERSIONSTAMPED_KEY, getLogKey(logSubspace, mapIndex, localOrder), Tuple.from("CONTAINS_KEY", key, wasPresent).pack()) ); op = workerMap.remove(tr, bmSubspace.subspace(Tuple.from(mapIndex)), key).thenAccept(oldValue -> tr.mutate(MutationType.SET_VERSIONSTAMPED_KEY, getLogKey(logSubspace, mapIndex, localOrder), Tuple.from("REMOVE", key, oldValue.orElse(null)).pack()) ); Subspace mapSubspace = bmSubspace.subspace(Tuple.from(mapIndex.get()));
@Nonnull private void addConvertRecordVersions(@Nonnull List<CompletableFuture<Void>> work) { if (useOldVersionFormat()) { throw new RecordCoreException("attempted to convert record versions when still using older format"); } final Subspace legacyVersionSubspace = getSubspace().subspace(Tuple.from(RECORD_VERSION_KEY)); // Read all of the keys in the old record version location. For each // record, copy its version to the new location within the primary record // subspace. Then once they are all copied, delete the old subspace. KeyValueCursor kvCursor = KeyValueCursor.Builder.withSubspace(legacyVersionSubspace) .setContext(getRecordContext()) .setScanProperties(ScanProperties.FORWARD_SCAN) .build(); CompletableFuture<Void> workFuture = kvCursor.forEach(kv -> { final Tuple primaryKey = legacyVersionSubspace.unpack(kv.getKey()); final FDBRecordVersion version = FDBRecordVersion.fromBytes(kv.getValue(), false); final byte[] newKeyBytes = getSubspace().pack(recordVersionKey(primaryKey)); final byte[] newValueBytes = SplitHelper.packVersion(version); ensureContextActive().set(newKeyBytes, newValueBytes); }).thenAccept(ignore -> ensureContextActive().clear(legacyVersionSubspace.range())); work.add(workFuture); }
@Override public String toString() { return ByteArrayUtil2.loggable(subspace.pack()); } }
private CompletableFuture<Optional<KeyValue>> currentCounter() { return transaction.snapshot().getRange(counterSubspace.range(), 1, true) .asList() .thenApply(list -> list.isEmpty() ? Optional.empty() : Optional.of(list.get(0))); }
KeySelector.firstGreaterOrEqual(primary.pack(Tuple.from(id, snapshot))), KeySelector.firstGreaterOrEqual(primary.pack(Tuple.from(id + " "))), 1, StreamingMode.EXACT, if (!primary.contains(kv.getKey())) { Tuple keyTuple = primary.unpack(kv.getKey()); String resourceId = keyTuple.getString(0); if (!id.equals(resourceId)) {
private static void clearPreviousSplitRecord(@Nonnull final FDBRecordContext context, @Nonnull final Subspace subspace, @Nonnull final Tuple key, final boolean clearBasedOnPreviousSizeInfo, @Nullable FDBStoredSizes previousSizeInfo) { final Transaction tr = context.ensureActive(); final Subspace keySplitSubspace = subspace.subspace(key); if (clearBasedOnPreviousSizeInfo) { if (previousSizeInfo != null) { if (previousSizeInfo.isSplit() || previousSizeInfo.isVersionedInline()) { tr.clear(keySplitSubspace.range()); // Record might be shorter than previous split. } else { // Record was previously unsplit and had unsplit suffix because we are splitting long records. tr.clear(keySplitSubspace.pack(UNSPLIT_RECORD)); } } } else { tr.clear(keySplitSubspace.range()); // Clears both unsplit and previous longer split. } context.getLocalVersion(key).ifPresent(localVersion -> context.removeVersionMutation(keySplitSubspace.pack(RECORD_VERSION))); }
levelSubspace = subspace.get(level); asyncIterator = lookupIterator(tr.getRange( KeySelector.firstGreaterOrEqual(levelSubspace.pack(rankKey)), KeySelector.firstGreaterThan(levelSubspace.pack(key)), ReadTransaction.ROW_LIMIT_UNLIMITED, false, rankKey = levelSubspace.unpack(kv.getKey()).getBytes(0); lastCount = decodeLong(kv.getValue()); rank += lastCount;
@Nonnull private List<Pair<Tuple, Integer>> scanTokenizerVersions(@Nonnull FDBRecordStore store, @Nonnull Index index) throws ExecutionException, InterruptedException { final Subspace tokenizerVersionSubspace = store.indexSecondarySubspace(index).subspace(TextIndexMaintainer.TOKENIZER_VERSION_SUBSPACE_TUPLE); return recordStore.ensureContextActive().getRange(tokenizerVersionSubspace.range()).asList().get().stream() .map(kv -> Pair.of(tokenizerVersionSubspace.unpack(kv.getKey()), (int)Tuple.fromBytes(kv.getValue()).getLong(0))) .collect(Collectors.toList()); }
/** * Converts the tuple produced for this path to a subspace. * * @param context the context in which to resolve the path * @return The subspace from the resolved path. */ default Subspace toSubspace(FDBRecordContext context) { return new Subspace(toTuple(context)); }
public AsyncIterable<byte[]> getRange(ReadTransaction tr, byte[] beginKey, byte[] endKey) { checkKey(beginKey); return AsyncUtil.mapIterable(tr.getRange(subspace.pack(Tuple.from(0, beginKey)), subspace.pack(Tuple.from(0, endKey))), keyValue -> { Tuple t = subspace.unpack(keyValue.getKey()); return t.getBytes(1); }); }
@Nonnull @Override public Subspace subspaceOf(@Nonnull byte[] keyBytes) { try { Tuple t = bmSubspace.unpack(keyBytes); return bmSubspace.subspace(TupleHelpers.subTuple(t, 0, 1)); } catch (IllegalArgumentException e) { System.out.println("key: " + ByteArrayUtil2.loggable(keyBytes)); System.out.println("subspace: " + ByteArrayUtil2.loggable(bmSubspace.getKey())); throw e; } }
@Test public void serializeKey() { List<Tuple> sortedTuples = TEST_TUPLES.stream().sorted().collect(Collectors.toList()); List<Tuple> sortedKeys = TEST_TUPLES.stream() .map(serializer::serializeKey) .sorted(ByteArrayUtil::compareUnsigned) .map(serializer::deserializeKey) .collect(Collectors.toList()); assertEquals(sortedTuples, sortedKeys); // Add a subspace and make sure unpacking by length works. Subspace subspace = new Subspace(Tuple.from("fake", "subspace")); List<Tuple> prefixedTuples = TEST_TUPLES.stream() .map(serializer::serializeKey) .map(b -> ByteArrayUtil.join(subspace.getKey(), b)) .map(data -> serializer.deserializeKey(data, subspace.getKey().length)) .collect(Collectors.toList()); assertEquals(TEST_TUPLES, prefixedTuples); }