private void assertProgressUpdatedToTimestamp(long ts) { assertProgressUpdatedToTimestamp(ts, CONS_SHARD); }
@Test public void canSweepAtMinimumTimeWithNoWrites() { runConservativeSweepAtTimestamp(Long.MIN_VALUE); assertProgressUpdatedToTimestamp(SweepQueueUtils.INITIAL_TIMESTAMP); }
@Test public void doesNotGoBackwardsEvenIfSweepTimestampRegressesWithinBucket() { enqueueWriteCommitted(TABLE_CONS, LOW_TS); enqueueWriteCommitted(TABLE_CONS, LOW_TS2); enqueueWriteCommitted(TABLE_CONS, LOW_TS3); runConservativeSweepAtTimestamp(LOW_TS2 + 5); assertReadAtTimestampReturnsSentinel(TABLE_CONS, LOW_TS2); assertTestValueEnqueuedAtGivenTimestampStillPresent(TABLE_CONS, LOW_TS2); verify(spiedKvs, times(1)).deleteAllTimestamps(any(TableReference.class), anyMap(), eq(false)); assertProgressUpdatedToTimestamp(LOW_TS2 + 5 - 1); runConservativeSweepAtTimestamp(LOW_TS2 - 5); verify(spiedKvs, times(1)).deleteAllTimestamps(any(TableReference.class), anyMap(), eq(false)); assertProgressUpdatedToTimestamp(LOW_TS2 + 5 - 1); }
@Test public void sweepProgressesToJustBeforeSweepTsWhenNothingToSweep() { sweepQueue.sweepNextBatch(ShardAndStrategy.conservative(CONS_SHARD)); assertProgressUpdatedToTimestamp(getSweepTsCons() - 1L); }
@Test public void multipleSweepersSweepDifferentShardsAndCallUnlockAfterwards() throws InterruptedException { int shards = 128; int sweepers = 8; int threads = shards / sweepers; TimelockService stickyLockService = createStickyLockService(); createAndInitializeSweepersAndWaitForOneBackgroundIteration(sweepers, shards, threads, stickyLockService); for (int i = 0; i < shards; i++) { assertProgressUpdatedToTimestamp(maxTsForFinePartition(tsPartitionFine(unreadableTs - 1)), i); verify(stickyLockService, times(1)).unlock(ImmutableSet.of(LockToken.of(new UUID(i, 0L)))); } // minimum: all threads on one host succeed, then on another, etc: // threads + threads * 2 + ... + threads * swepers verify(stickyLockService, atLeast(threads * sweepers * (sweepers - 1) / 2)) .lock(any(LockRequest.class)); // maximum: all but one succeed on each host, and only then those succeed: // shards + shards - 1 + ... + shards - (sweepers - 1) verify(stickyLockService, atMost(sweepers * shards - sweepers * (sweepers - 1) / 2)) .lock(any(LockRequest.class)); }
@Test public void sweepProgressesToEndOfPartitionWhenFewValuesAndSweepTsLarge() { long writeTs = getSweepTsCons() - 3 * TS_FINE_GRANULARITY; enqueueWriteCommitted(TABLE_CONS, writeTs); enqueueWriteCommitted(TABLE_CONS, writeTs + 5); sweepQueue.sweepNextBatch(ShardAndStrategy.conservative(CONS_SHARD)); assertProgressUpdatedToTimestamp(maxTsForFinePartition(tsPartitionFine(writeTs))); sweepQueue.sweepNextBatch(ShardAndStrategy.conservative(CONS_SHARD)); assertProgressUpdatedToTimestamp(getSweepTsCons() - 1L); }
@Test public void canSweepAtMinimumTime() { enqueueWriteCommitted(TABLE_CONS, LOW_TS); enqueueWriteCommitted(TABLE_CONS, LOW_TS2); enqueueWriteCommitted(TABLE_CONS, LOW_TS3); runConservativeSweepAtTimestamp(Long.MIN_VALUE); assertTestValueEnqueuedAtGivenTimestampStillPresent(TABLE_CONS, LOW_TS); assertProgressUpdatedToTimestamp(SweepQueueUtils.INITIAL_TIMESTAMP); }