SweepBatch getNextBatchToSweep(ShardAndStrategy shardStrategy, long lastSweptTs, long sweepTs) { return sweepableTimestamps.nextSweepableTimestampPartition(shardStrategy, lastSweptTs, sweepTs) .map(fine -> sweepableCells.getBatchForPartition(shardStrategy, fine, lastSweptTs, sweepTs)) .orElse(SweepBatch.of(ImmutableList.of(), DedicatedRows.of(ImmutableList.of()), sweepTs - 1L)); } }
@Override Map<Cell, byte[]> populateCells(PartitionInfo info, List<WriteInfo> writes) { SweepableTimestampsTable.SweepableTimestampsRow row = computeRow(info); SweepableTimestampsTable.SweepableTimestampsColumn col = computeColumn(info); SweepableTimestampsTable.SweepableTimestampsColumnValue colVal = SweepableTimestampsTable.SweepableTimestampsColumnValue.of(col, DUMMY); return ImmutableMap.of(SweepQueueUtils.toCell(row, colVal), colVal.persistValue()); }
private Optional<Long> getCandidatesInCoarsePartition(ShardAndStrategy shardStrategy, long partitionCoarse, ColumnRangeSelection colRange) { byte[] rowBytes = computeRowBytes(shardStrategy, partitionCoarse); RowColumnRangeIterator colIterator = getRowsColumnRange(ImmutableList.of(rowBytes), colRange, 1); if (!colIterator.hasNext()) { return Optional.empty(); } Map.Entry<Cell, Value> firstColumnEntry = colIterator.next(); return Optional.of(getFinePartitionFromEntry(firstColumnEntry)); }
private Optional<Long> nextSweepablePartition(ShardAndStrategy shardAndStrategy, long minFineInclusive, long maxFineInclusive) { ColumnRangeSelection range = getColRangeSelection(minFineInclusive, maxFineInclusive + 1); long current = SweepQueueUtils.partitionFineToCoarse(minFineInclusive); long maxCoarseInclusive = SweepQueueUtils.partitionFineToCoarse(maxFineInclusive); while (current <= maxCoarseInclusive) { Optional<Long> candidateFine = getCandidatesInCoarsePartition(shardAndStrategy, current, range); if (candidateFine.isPresent()) { return candidateFine; } current++; } return Optional.empty(); }
@Before public void setup() { super.setup(); sweepQueue = TargetedSweeper.createUninitializedForTest(metricsManager, () -> enabled, () -> DEFAULT_SHARDS); mockFollower = mock(TargetedSweepFollower.class); timelockService = mock(TimelockService.class); sweepQueue.initializeWithoutRunning(timestampsSupplier, timelockService, spiedKvs, txnService, mockFollower); progress = new ShardProgress(spiedKvs); sweepableTimestamps = new SweepableTimestamps(spiedKvs, partitioner); sweepableCells = new SweepableCells(spiedKvs, partitioner, null, txnService); puncherStore = KeyValueServicePuncherStore.create(spiedKvs, false); }
/** * Deletes the entire row of the Sweepable Timestamps table. * @param shardStrategy desired shard and strategy * @param partitionCoarse coarse partition for which the row should be deleted */ void deleteRow(ShardAndStrategy shardStrategy, long partitionCoarse) { byte[] rowBytes = computeRowBytes(shardStrategy, partitionCoarse); RangeRequest request = RangeRequest.builder() .startRowInclusive(rowBytes) .endRowExclusive(RangeRequests.nextLexicographicName(rowBytes)) .retainColumns(ColumnSelection.all()) .build(); deleteRange(request); } }
@Override public void enqueue(List<WriteInfo> writes) { sweepableTimestamps.enqueue(writes); sweepableCells.enqueue(writes); log.debug("Enqueued {} writes into the sweep queue.", SafeArg.of("writes", writes.size())); } }
private void cleanSweepableTimestamps(ShardAndStrategy shardStrategy, long oldProgress, long newProgress) { if (firstIterationOfSweep(oldProgress)) { return; } long lastSweptPartitionPreviously = SweepQueueUtils.tsPartitionCoarse(oldProgress); long minimumSweepPartitionNextIteration = SweepQueueUtils.tsPartitionCoarse(newProgress + 1); if (minimumSweepPartitionNextIteration > lastSweptPartitionPreviously) { sweepableTimestamps.deleteRow(shardStrategy, lastSweptPartitionPreviously); log.info("Deleted persisted sweep queue information in table {} for partition {}.", LoggingArgs.tableRef(TargetedSweepTableFactory.of() .getSweepableTimestampsTable(null).getTableRef()), SafeArg.of("partition", lastSweptPartitionPreviously)); } }
static SweepQueueFactory create( TargetedSweepMetrics metrics, KeyValueService kvs, TimelockService timelock, Supplier<Integer> shardsConfig, TransactionService transaction) { Schemas.createTablesAndIndexes(TargetedSweepSchema.INSTANCE.getLatestSchema(), kvs); ShardProgress shardProgress = new ShardProgress(kvs); Supplier<Integer> shards = createProgressUpdatingSupplier(shardsConfig, shardProgress, SweepQueueUtils.REFRESH_TIME); WriteInfoPartitioner partitioner = new WriteInfoPartitioner(kvs, shards); SweepableCells cells = new SweepableCells(kvs, partitioner, metrics, transaction); SweepableTimestamps timestamps = new SweepableTimestamps(kvs, partitioner); return new SweepQueueFactory(shardProgress, shards, cells, timestamps, metrics, kvs, timelock); }
private Optional<Long> nextSweepablePartition(ShardAndStrategy shardAndStrategy, long minFineInclusive, long maxFineInclusive) { ColumnRangeSelection range = getColRangeSelection(minFineInclusive, maxFineInclusive + 1); long current = SweepQueueUtils.partitionFineToCoarse(minFineInclusive); long maxCoarseInclusive = SweepQueueUtils.partitionFineToCoarse(maxFineInclusive); while (current <= maxCoarseInclusive) { Optional<Long> candidateFine = getCandidatesInCoarsePartition(shardAndStrategy, current, range); if (candidateFine.isPresent()) { return candidateFine; } current++; } return Optional.empty(); }
/** * Deletes the entire row of the Sweepable Timestamps table. * @param shardStrategy desired shard and strategy * @param partitionCoarse coarse partition for which the row should be deleted */ void deleteRow(ShardAndStrategy shardStrategy, long partitionCoarse) { byte[] rowBytes = computeRowBytes(shardStrategy, partitionCoarse); RangeRequest request = RangeRequest.builder() .startRowInclusive(rowBytes) .endRowExclusive(RangeRequests.nextLexicographicName(rowBytes)) .retainColumns(ColumnSelection.all()) .build(); deleteRange(request); } }
@Override public void enqueue(List<WriteInfo> writes) { sweepableTimestamps.enqueue(writes); sweepableCells.enqueue(writes); log.debug("Enqueued {} writes into the sweep queue.", SafeArg.of("writes", writes.size())); } }
private void cleanSweepableTimestamps(ShardAndStrategy shardStrategy, long oldProgress, long newProgress) { if (firstIterationOfSweep(oldProgress)) { return; } long lastSweptPartitionPreviously = SweepQueueUtils.tsPartitionCoarse(oldProgress); long minimumSweepPartitionNextIteration = SweepQueueUtils.tsPartitionCoarse(newProgress + 1); if (minimumSweepPartitionNextIteration > lastSweptPartitionPreviously) { sweepableTimestamps.deleteRow(shardStrategy, lastSweptPartitionPreviously); log.info("Deleted persisted sweep queue information in table {} for partition {}.", LoggingArgs.tableRef(TargetedSweepTableFactory.of() .getSweepableTimestampsTable(null).getTableRef()), SafeArg.of("partition", lastSweptPartitionPreviously)); } }
private Optional<Long> getCandidatesInCoarsePartition(ShardAndStrategy shardStrategy, long partitionCoarse, ColumnRangeSelection colRange) { byte[] rowBytes = computeRowBytes(shardStrategy, partitionCoarse); RowColumnRangeIterator colIterator = getRowsColumnRange(ImmutableList.of(rowBytes), colRange, 1); if (!colIterator.hasNext()) { return Optional.empty(); } Map.Entry<Cell, Value> firstColumnEntry = colIterator.next(); return Optional.of(getFinePartitionFromEntry(firstColumnEntry)); }
private void assertNoEntriesInSweepableTimestampsBeforeSweepTimestamp() { assertThat(sweepableTimestamps .nextSweepableTimestampPartition(ShardAndStrategy.conservative(CONS_SHARD), -1L, getSweepTsCons())) .isEmpty(); }
@Before @Override public void setup() { super.setup(); progress = new ShardProgress(spiedKvs); sweepableTimestamps = new SweepableTimestamps(spiedKvs, partitioner); shardCons = writeToDefaultCellCommitted(sweepableTimestamps, TS, TABLE_CONS); shardThor = writeToDefaultCellCommitted(sweepableTimestamps, TS2, TABLE_THOR); }
@Override Map<Cell, byte[]> populateCells(PartitionInfo info, List<WriteInfo> writes) { SweepableTimestampsTable.SweepableTimestampsRow row = computeRow(info); SweepableTimestampsTable.SweepableTimestampsColumn col = computeColumn(info); SweepableTimestampsTable.SweepableTimestampsColumnValue colVal = SweepableTimestampsTable.SweepableTimestampsColumnValue.of(col, DUMMY); return ImmutableMap.of(SweepQueueUtils.toCell(row, colVal), colVal.persistValue()); }
private void assertLowestFinePartitionInSweepableTimestampsEquals(long partitionFine) { assertThat(sweepableTimestamps .nextSweepableTimestampPartition(ShardAndStrategy.conservative(CONS_SHARD), -1L, getSweepTsCons())) .contains(partitionFine); }
static SweepQueueFactory create( TargetedSweepMetrics metrics, KeyValueService kvs, TimelockService timelock, Supplier<Integer> shardsConfig, TransactionService transaction) { Schemas.createTablesAndIndexes(TargetedSweepSchema.INSTANCE.getLatestSchema(), kvs); ShardProgress shardProgress = new ShardProgress(kvs); Supplier<Integer> shards = createProgressUpdatingSupplier(shardsConfig, shardProgress, SweepQueueUtils.REFRESH_TIME); WriteInfoPartitioner partitioner = new WriteInfoPartitioner(kvs, shards); SweepableCells cells = new SweepableCells(kvs, partitioner, metrics, transaction); SweepableTimestamps timestamps = new SweepableTimestamps(kvs, partitioner); return new SweepQueueFactory(shardProgress, shards, cells, timestamps, metrics, kvs, timelock); }
private Optional<Long> readConservative(int shardNumber) { return sweepableTimestamps.nextSweepableTimestampPartition( conservative(shardNumber), progress.getLastSweptTimestamp(ShardAndStrategy.conservative(shardNumber)), Sweeper.CONSERVATIVE.getSweepTimestamp(timestampsSupplier)); }