private void cleanDedicatedRows(DedicatedRows dedicatedRows) { sweepableCells.deleteDedicatedRows(dedicatedRows); }
private void cleanNonDedicatedRow(ShardAndStrategy shardStrategy, long partitionToDelete) { sweepableCells.deleteNonDedicatedRow(shardStrategy, partitionToDelete); }
@Override public void enqueue(List<WriteInfo> writes) { sweepableTimestamps.enqueue(writes); sweepableCells.enqueue(writes); log.debug("Enqueued {} writes into the sweep queue.", SafeArg.of("writes", writes.size())); } }
private Map<Cell, byte[]> addReferenceToDedicatedRows(PartitionInfo info, List<WriteInfo> writes) { return addCell(info, WriteReference.DUMMY, false, 0, entryIndicatingNumberOfRequiredRows(writes)); }
private List<RangeRequest> rangeRequestsDedicatedRows(ShardAndStrategy shardAndStrategy, long partitionFine) { SweepableCellsTable.SweepableCellsRow row = computeRow(partitionFine, shardAndStrategy); RowColumnRangeIterator rowIterator = getWithColumnRangeAllForRow(row); List<RangeRequest> requests = new ArrayList<>(); rowIterator.forEachRemaining(entry -> requests.addAll(rangeRequestsIfDedicated(row, computeColumn(entry)))); return requests; }
private RowColumnRangeIterator getRowColumnRange(SweepableCellsRow row, long partitionFine, long minTsExclusive, long maxTsExclusive) { return getRowsColumnRange(ImmutableList.of(row.persistToBytes()), columnsBetween(minTsExclusive + 1, maxTsExclusive, partitionFine), SweepQueueUtils.BATCH_SIZE_KVS); }
private WriteBatch getBatchOfWrites(SweepableCellsRow row, PeekingIterator<Map.Entry<Cell, Value>> resultIterator, long sweepTs) { WriteBatch writeBatch = new WriteBatch(); while (resultIterator.hasNext() && writeBatch.writesByStartTs.size() < SweepQueueUtils.SWEEP_BATCH_SIZE) { Map.Entry<Cell, Value> entry = resultIterator.next(); SweepableCellsTable.SweepableCellsColumn col = computeColumn(entry); long startTs = getTimestamp(row, col); if (knownToBeCommittedAfterSweepTs(startTs, sweepTs)) { writeBatch.add(ImmutableList.of(getWriteInfo(startTs, entry.getValue()))); return writeBatch; } writeBatch.merge(getWrites(row, col, entry.getValue())); } // there may be entries remaining with the same start timestamp as the last processed one. If that is the case // we want to include these ones as well. This is OK since there are at most MAX_CELLS_GENERIC - 1 of them. while (resultIterator.hasNext()) { Map.Entry<Cell, Value> entry = resultIterator.peek(); SweepableCellsTable.SweepableCellsColumn col = computeColumn(entry); long timestamp = getTimestamp(row, col); if (writeBatch.writesByStartTs.containsKey(timestamp)) { writeBatch.merge(getWrites(row, col, entry.getValue())); resultIterator.next(); } else { break; } } return writeBatch; }
SweepBatch getNextBatchToSweep(ShardAndStrategy shardStrategy, long lastSweptTs, long sweepTs) { return sweepableTimestamps.nextSweepableTimestampPartition(shardStrategy, lastSweptTs, sweepTs) .map(fine -> sweepableCells.getBatchForPartition(shardStrategy, fine, lastSweptTs, sweepTs)) .orElse(SweepBatch.of(ImmutableList.of(), DedicatedRows.of(ImmutableList.of()), sweepTs - 1L)); } }
@Before public void setup() { super.setup(); metrics = TargetedSweepMetrics.create(metricsManager, mock(TimelockService.class), spiedKvs, 1); sweepableCells = new SweepableCells(spiedKvs, partitioner, metrics, txnService); shardCons = writeToDefaultCellCommitted(sweepableCells, TS, TABLE_CONS); shardThor = writeToDefaultCellCommitted(sweepableCells, TS2, TABLE_THOR); }
@Override Map<Cell, byte[]> populateReferences(PartitionInfo partitionInfo, List<WriteInfo> writes) { boolean dedicate = writes.size() > SweepQueueUtils.MAX_CELLS_GENERIC; if (dedicate) { return addReferenceToDedicatedRows(partitionInfo, writes); } else { return ImmutableMap.of(); } }
@Override Map<Cell, byte[]> populateCells(PartitionInfo partitionInfo, List<WriteInfo> writes) { Map<Cell, byte[]> cells = new HashMap<>(); boolean dedicate = writes.size() > SweepQueueUtils.MAX_CELLS_GENERIC; long index = 0; for (WriteInfo write : writes) { cells.putAll(addWrite(partitionInfo, write, dedicate, index)); index++; } return cells; }
private Map<Cell, byte[]> addWrite(PartitionInfo info, WriteInfo write, boolean dedicate, long index) { return addCell(info, write.writeRef(), dedicate, index / SweepQueueUtils.MAX_CELLS_DEDICATED, index % SweepQueueUtils.MAX_CELLS_DEDICATED); }
private WriteBatch getBatchOfWrites(SweepableCellsRow row, PeekingIterator<Map.Entry<Cell, Value>> resultIterator, long sweepTs) { WriteBatch writeBatch = new WriteBatch(); while (resultIterator.hasNext() && writeBatch.writesByStartTs.size() < SweepQueueUtils.SWEEP_BATCH_SIZE) { Map.Entry<Cell, Value> entry = resultIterator.next(); SweepableCellsTable.SweepableCellsColumn col = computeColumn(entry); long startTs = getTimestamp(row, col); if (knownToBeCommittedAfterSweepTs(startTs, sweepTs)) { writeBatch.add(ImmutableList.of(getWriteInfo(startTs, entry.getValue()))); return writeBatch; } writeBatch.merge(getWrites(row, col, entry.getValue())); } // there may be entries remaining with the same start timestamp as the last processed one. If that is the case // we want to include these ones as well. This is OK since there are at most MAX_CELLS_GENERIC - 1 of them. while (resultIterator.hasNext()) { Map.Entry<Cell, Value> entry = resultIterator.peek(); SweepableCellsTable.SweepableCellsColumn col = computeColumn(entry); long timestamp = getTimestamp(row, col); if (writeBatch.writesByStartTs.containsKey(timestamp)) { writeBatch.merge(getWrites(row, col, entry.getValue())); resultIterator.next(); } else { break; } } return writeBatch; }
private List<RangeRequest> rangeRequestsDedicatedRows(ShardAndStrategy shardAndStrategy, long partitionFine) { SweepableCellsTable.SweepableCellsRow row = computeRow(partitionFine, shardAndStrategy); RowColumnRangeIterator rowIterator = getWithColumnRangeAllForRow(row); List<RangeRequest> requests = new ArrayList<>(); rowIterator.forEachRemaining(entry -> requests.addAll(rangeRequestsIfDedicated(row, computeColumn(entry)))); return requests; }
private SweepBatch readThorough(long partition, long minExclusive, long maxExclusive) { return sweepableCells.getBatchForPartition(thorough(shardThor), partition, minExclusive, maxExclusive); }
@Before public void setup() { super.setup(); sweepQueue = TargetedSweeper.createUninitializedForTest(metricsManager, () -> enabled, () -> DEFAULT_SHARDS); mockFollower = mock(TargetedSweepFollower.class); timelockService = mock(TimelockService.class); sweepQueue.initializeWithoutRunning(timestampsSupplier, timelockService, spiedKvs, txnService, mockFollower); progress = new ShardProgress(spiedKvs); sweepableTimestamps = new SweepableTimestamps(spiedKvs, partitioner); sweepableCells = new SweepableCells(spiedKvs, partitioner, null, txnService); puncherStore = KeyValueServicePuncherStore.create(spiedKvs, false); }
private RowColumnRangeIterator getRowColumnRange(SweepableCellsRow row, long partitionFine, long minTsExclusive, long maxTsExclusive) { return getRowsColumnRange(ImmutableList.of(row.persistToBytes()), columnsBetween(minTsExclusive + 1, maxTsExclusive, partitionFine), SweepQueueUtils.BATCH_SIZE_KVS); }
private Map<Cell, byte[]> addReferenceToDedicatedRows(PartitionInfo info, List<WriteInfo> writes) { return addCell(info, WriteReference.DUMMY, false, 0, entryIndicatingNumberOfRequiredRows(writes)); }
@Override Map<Cell, byte[]> populateReferences(PartitionInfo partitionInfo, List<WriteInfo> writes) { boolean dedicate = writes.size() > SweepQueueUtils.MAX_CELLS_GENERIC; if (dedicate) { return addReferenceToDedicatedRows(partitionInfo, writes); } else { return ImmutableMap.of(); } }
@Override Map<Cell, byte[]> populateCells(PartitionInfo partitionInfo, List<WriteInfo> writes) { Map<Cell, byte[]> cells = new HashMap<>(); boolean dedicate = writes.size() > SweepQueueUtils.MAX_CELLS_GENERIC; long index = 0; for (WriteInfo write : writes) { cells.putAll(addWrite(partitionInfo, write, dedicate, index)); index++; } return cells; }