@Nonnull @Override public Subspace subspaceOf(@Nonnull byte[] keyBytes) { Tuple t = indexSubspace.unpack(keyBytes); return indexSubspace.subspace(TupleHelpers.subTuple(t, 0, groupingColumns)); }
@Nonnull public Subspace recordsSubspace() { if (cachedRecordsSubspace == null) { cachedRecordsSubspace = getSubspace().subspace(Tuple.from(RECORD_KEY)); } return cachedRecordsSubspace; }
@Nonnull public Subspace indexStateSubspace() { return getSubspace().subspace(Tuple.from(INDEX_STATE_SPACE_KEY)); }
private CompletableFuture<Subspace> getReverseCacheSubspace(LocatableResolver scope) { return reverseDirectoryCacheEntry.thenApply(entry -> scope.getBaseSubspace().subspace(Tuple.from(entry))); }
/** * Subspace for index in which to place a record in uniqueness violations. This * is used while the index is being built to keep track of what values are duplicated and * thus have to be addressed later. * @param index the index to retrieve the uniqueness violation subspace for * @return the subspace for the uniqueness violations for the given index */ @Nonnull public Subspace indexUniquenessViolationsSubspace(@Nonnull Index index) { return getSubspace().subspace(Tuple.from(INDEX_UNIQUENESS_VIOLATIONS_KEY, index.getSubspaceKey())); }
/** * Subspace for index in which to place a {@link com.apple.foundationdb.async.RangeSet RangeSet}. * This is used for determining how much progress has been made on building the index in the * case that one is building the index offline. * @param index the index to retrieve the range subspace for * @return the subspace for the {@link com.apple.foundationdb.async.RangeSet RangeSet} for the given index */ @Nonnull public Subspace indexRangeSubspace(@Nonnull Index index) { return getSubspace().subspace(Tuple.from(INDEX_RANGE_SPACE_KEY, index.getSubspaceKey())); }
@Nonnull public Subspace indexSubspace(@Nonnull Index index) { return getSubspace().subspace(Tuple.from(INDEX_KEY, index.getSubspaceKey())); }
@Nonnull public Subspace indexSecondarySubspace(@Nonnull Index index) { return getSubspace().subspace(Tuple.from(INDEX_SECONDARY_SPACE_KEY, index.getSubspaceKey())); }
@Nonnull private CompletableFuture<Void> buildRange(@Nonnull Subspace subspace, @Nullable Key.Evaluated start, @Nullable Key.Evaluated end) { RangeSet rangeSet = new RangeSet(subspace.subspace(Tuple.from(FDBRecordStore.INDEX_RANGE_SPACE_KEY, index.getSubspaceKey()))); byte[] startBytes = packOrNull(convertOrNull(start)); byte[] endBytes = packOrNull(convertOrNull(end)); Queue<Range> rangeDeque = new ArrayDeque<>(); return rangeSet.missingRanges(runner.getDatabase().database(), startBytes, endBytes) .thenAccept(rangeDeque::addAll) .thenCompose(vignore -> buildRanges(subspace, rangeSet, rangeDeque)); }
@Nonnull @Override public Subspace subspaceOf(@Nonnull byte[] keyBytes) { Tuple t = mapSubspace.unpack(keyBytes); return mapSubspace.subspace(TupleHelpers.subTuple(t, 0, 1)); }
private static void clearPreviousSplitRecord(@Nonnull final FDBRecordContext context, @Nonnull final Subspace subspace, @Nonnull final Tuple key, final boolean clearBasedOnPreviousSizeInfo, @Nullable FDBStoredSizes previousSizeInfo) { final Transaction tr = context.ensureActive(); final Subspace keySplitSubspace = subspace.subspace(key); if (clearBasedOnPreviousSizeInfo) { if (previousSizeInfo != null) { if (previousSizeInfo.isSplit() || previousSizeInfo.isVersionedInline()) { tr.clear(keySplitSubspace.range()); // Record might be shorter than previous split. } else { // Record was previously unsplit and had unsplit suffix because we are splitting long records. tr.clear(keySplitSubspace.pack(UNSPLIT_RECORD)); } } } else { tr.clear(keySplitSubspace.range()); // Clears both unsplit and previous longer split. } context.getLocalVersion(key).ifPresent(localVersion -> context.removeVersionMutation(keySplitSubspace.pack(RECORD_VERSION))); }
@Nonnull @Override public Subspace subspaceOf(@Nonnull byte[] keyBytes) { try { Tuple t = bmSubspace.unpack(keyBytes); return bmSubspace.subspace(TupleHelpers.subTuple(t, 0, 1)); } catch (IllegalArgumentException e) { System.out.println("key: " + ByteArrayUtil2.loggable(keyBytes)); System.out.println("subspace: " + ByteArrayUtil2.loggable(bmSubspace.getKey())); throw e; } }
public <M extends Message> CompletableFuture<Long> rank(@Nonnull FDBRecord<M> record) { final int groupPrefixSize = getGroupingCount(); KeyExpression indexExpr = state.index.getRootExpression(); Key.Evaluated indexKey = indexExpr.evaluateSingleton(record); Tuple scoreValue = indexKey.toTuple(); Subspace rankSubspace = getSecondarySubspace(); if (groupPrefixSize > 0) { Tuple prefix = Tuple.fromList(scoreValue.getItems().subList(0, groupPrefixSize)); rankSubspace = rankSubspace.subspace(prefix); scoreValue = Tuple.fromList(scoreValue.getItems().subList(groupPrefixSize, scoreValue.size())); } RankedSet rankedSet = new RankedSetIndexHelper.InstrumentedRankedSet(state, rankSubspace, nlevels); return RankedSetIndexHelper.rankForScore(state, rankedSet, scoreValue); }
private byte[] getLogKey(@Nonnull Subspace logSubspace, int mapIndex, @Nonnull AtomicInteger localOrder) { return logSubspace.subspace(Tuple.from(mapIndex)).packWithVersionstamp(Tuple.from(Versionstamp.incomplete(localOrder.getAndIncrement()))); }
@Nonnull private List<Pair<Tuple, Integer>> scanTokenizerVersions(@Nonnull FDBRecordStore store, @Nonnull Index index) throws ExecutionException, InterruptedException { final Subspace tokenizerVersionSubspace = store.indexSecondarySubspace(index).subspace(TextIndexMaintainer.TOKENIZER_VERSION_SUBSPACE_TUPLE); return recordStore.ensureContextActive().getRange(tokenizerVersionSubspace.range()).asList().get().stream() .map(kv -> Pair.of(tokenizerVersionSubspace.unpack(kv.getKey()), (int)Tuple.fromBytes(kv.getValue()).getLong(0))) .collect(Collectors.toList()); }
private void deleteSplit(@Nonnull FDBRecordContext context, @Nonnull Tuple key, boolean splitLongRecords, boolean omitUnsplitSuffix, @Nullable FDBStoredSizes sizeInfo) { SplitHelper.deleteSplit(context, subspace, key, splitLongRecords, omitUnsplitSuffix, sizeInfo != null, sizeInfo); int count = KeyValueCursor.Builder.withSubspace(subspace.subspace(key)) .setContext(context) .setScanProperties(ScanProperties.FORWARD_SCAN) .build() .getCount() .join(); assertEquals(0, count); }
@BeforeAll public static void setup() throws InterruptedException, ExecutionException { FDB fdb = FDB.selectAPIVersion(520); fdb.setUnclosedWarning(true); db = fdb.open(); bmSubspace = DirectoryLayer.getDefault().createOrOpen(db, PathUtil.from(BunchedMapIterator.class.getSimpleName())).get(); subSubspaces = LongStream.range(0L, 50L).boxed().map(l -> bmSubspace.subspace(Tuple.from(l))).collect(Collectors.toList()); map = new BunchedMap<>(BunchedTupleSerializer.instance(), Comparator.naturalOrder(), 10); keys = LongStream.range(100L, 500L).boxed().map(Tuple::from).collect(Collectors.toList()); value = Tuple.from(1066L); }
private <E extends Throwable> SplitHelper.SizeInfo saveUnsuccessfully(@Nonnull FDBRecordContext context, @Nonnull Tuple key, byte[] serialized, @Nullable FDBRecordVersion version, boolean splitLongRecords, boolean omitUnsplitSuffix, @Nullable FDBStoredSizes previousSizeInfo, @Nonnull Class<E> errClazz, @Nonnull String errMessage) { final SplitHelper.SizeInfo sizeInfo = new SplitHelper.SizeInfo(); E e = assertThrows(errClazz, () -> SplitHelper.saveWithSplit(context, subspace, key, serialized, version, splitLongRecords, omitUnsplitSuffix, previousSizeInfo != null, previousSizeInfo, sizeInfo)); assertThat(e.getMessage(), containsString(errMessage)); assertEquals(0, sizeInfo.getKeyCount()); assertEquals(0, sizeInfo.getKeySize()); assertEquals(0, sizeInfo.getValueSize()); assertThat(sizeInfo.isVersionedInline(), is(false)); int count = KeyValueCursor.Builder.withSubspace(subspace.subspace(key)) .setContext(context) .setScanProperties(ScanProperties.FORWARD_SCAN) .build() .getCount() .join(); assertEquals(0, previousSizeInfo == null ? 0 : previousSizeInfo.getKeyCount()); return sizeInfo; }
@Nonnull private void addConvertRecordVersions(@Nonnull List<CompletableFuture<Void>> work) { if (useOldVersionFormat()) { throw new RecordCoreException("attempted to convert record versions when still using older format"); } final Subspace legacyVersionSubspace = getSubspace().subspace(Tuple.from(RECORD_VERSION_KEY)); // Read all of the keys in the old record version location. For each // record, copy its version to the new location within the primary record // subspace. Then once they are all copied, delete the old subspace. KeyValueCursor kvCursor = KeyValueCursor.Builder.withSubspace(legacyVersionSubspace) .setContext(getRecordContext()) .setScanProperties(ScanProperties.FORWARD_SCAN) .build(); CompletableFuture<Void> workFuture = kvCursor.forEach(kv -> { final Tuple primaryKey = legacyVersionSubspace.unpack(kv.getKey()); final FDBRecordVersion version = FDBRecordVersion.fromBytes(kv.getValue(), false); final byte[] newKeyBytes = getSubspace().pack(recordVersionKey(primaryKey)); final byte[] newValueBytes = SplitHelper.packVersion(version); ensureContextActive().set(newKeyBytes, newValueBytes); }).thenAccept(ignore -> ensureContextActive().clear(legacyVersionSubspace.range())); work.add(workFuture); }