/** * Convenience method that returns the {@link #getTimeIncreasing()} value * as a {@link Date} instance. * * @return strictly increasing time */ public Date getDateIncreasing() throws InterruptedException { return new Date(getTimeIncreasing()); }
/** * Waits until the given point in time is reached. The current thread * is suspended until the {@link #getTimeIncreasing()} method returns * a time that's equal or greater than the given point in time. * * @param timestamp time in milliseconds since epoch * @throws InterruptedException if the wait was interrupted */ public void waitUntil(long timestamp) throws InterruptedException { long now = getTimeIncreasing(); while (now < timestamp) { Thread.sleep(timestamp - now); now = getTimeIncreasing(); } }
/** * Waits until the given point in time is reached. The current thread * is suspended until the {@link #getTimeIncreasing()} method returns * a time that's equal or greater than the given point in time. * * @param timestamp time in milliseconds since epoch * @throws InterruptedException if the wait was interrupted */ public void waitUntil(long timestamp) throws InterruptedException { long now = getTimeIncreasing(); while (now < timestamp) { Thread.sleep(timestamp - now); now = getTimeIncreasing(); } }
/** * Convenience method that returns the {@link #getTimeIncreasing()} value * as a {@link Date} instance. * * @return strictly increasing time */ public Date getDateIncreasing() throws InterruptedException { return new Date(getTimeIncreasing()); }
private static long getTime() { try { return Clock.SIMPLE.getTimeIncreasing(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); return Clock.SIMPLE.getTimeMonotonic(); } } }
private static long getTime() { try { return Clock.SIMPLE.getTimeIncreasing(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); return Clock.SIMPLE.getTimeMonotonic(); } } }
private void testClockIncreasing(Clock clock) throws InterruptedException { long time = 0; for (int i = 0; i < 10; i++) { long now = clock.getTimeIncreasing(); assertTrue(time < now); time = now; } }
protected static File createTestDirectory() throws InterruptedException { final File dir = new File("target", "upgrade-" + Clock.SIMPLE.getTimeIncreasing()); FileUtils.deleteQuietly(dir); return dir; }
public static String configureUniqueId(NodeBuilder definition) { NodeBuilder status = definition.child(IndexDefinition.STATUS_NODE); String uid = status.getString(IndexDefinition.PROP_UID); if (uid == null) { try { uid = String.valueOf(Clock.SIMPLE.getTimeIncreasing()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); uid = String.valueOf(Clock.SIMPLE.getTime()); } status.setProperty(IndexDefinition.PROP_UID, uid); } return uid; }
private void updateLastModified(String name) { try { // Update file timestamp manually to mimic last updated time updates without sleeping CLOCK.waitUntil(CLOCK.getTime() + TimeUnit.SECONDS.toMillis(2)); File f = new File(directory, name); f.setLastModified(CLOCK.getTimeIncreasing()); } catch (InterruptedException ie) { // ignored } }
@Test public void cancellablePurge() throws Exception { BlobDeletionCallback bdc = adbc.getBlobDeletionCallback(); for (int i = 0; i < 10; i++) { String id = "Blob" + i; bdc.deleted(id, Collections.singleton(id)); } bdc.commitProgress(COMMIT_SUCCEDED); Semaphore purgeBlocker = new Semaphore(0); blobStore.callback = () -> purgeBlocker.acquireUninterruptibly(); Thread purgeThread = new Thread(() -> { try { adbc.purgeBlobsDeleted(clock.getTimeIncreasing(), blobStore); } catch (InterruptedException e) { e.printStackTrace(); } }); purgeThread.setDaemon(true); purgeBlocker.release(10);//allow 5 deletes purgeThread.start(); boolean deleted5 = waitFor(5000, () -> blobStore.deletedChunkIds.size() >= 10); assertTrue("Deleted " + blobStore.deletedChunkIds.size() + " chunks", deleted5); adbc.cancelBlobCollection(); purgeBlocker.release(20);//release all that's there... this is more than needed, btw. boolean deleted6 = waitFor(5000, () -> blobStore.deletedChunkIds.size() >= 12); assertTrue("Haven't deleted another blob which was locked earlier.", deleted6); boolean cancelWorked = waitFor(5000, () -> !purgeThread.isAlive()); assertTrue("Cancel didn't let go of purge thread in 2 seconds", cancelWorked); assertTrue("Cancelling purge must return asap", blobStore.deletedChunkIds.size() == 12); }
@Test public void simpleCase() throws Exception { BlobDeletionCallback bdc = adbc.getBlobDeletionCallback(); bdc.deleted("blobId", Collections.singleton("/a")); bdc.commitProgress(COMMIT_SUCCEDED); adbc.purgeBlobsDeleted(clock.getTimeIncreasing(), blobStore); verifyBlobsDeleted("blobId"); }
@Test public void noopDoesNothing() throws Exception { adbc = ActiveDeletedBlobCollectorFactory.NOOP; BlobDeletionCallback bdc = adbc.getBlobDeletionCallback(); bdc.deleted("blobId", Collections.singleton("/a")); bdc.commitProgress(COMMIT_SUCCEDED); adbc.purgeBlobsDeleted(clock.getTimeIncreasing(), blobStore); verifyBlobsDeleted(); }
@Test public void uncommittedDeletionsMustNotBePurged() throws Exception { BlobDeletionCallback bdc1 = adbc.getBlobDeletionCallback(); bdc1.deleted("blobId1", Collections.singleton("/a")); bdc1.commitProgress(COMMIT_FAILED); BlobDeletionCallback bdc2 = adbc.getBlobDeletionCallback(); bdc2.deleted("blobId2", Collections.singleton("/b")); bdc2.commitProgress(COMMIT_SUCCEDED); adbc.purgeBlobsDeleted(clock.getTimeIncreasing(), blobStore); //blobId2 is committed later verifyBlobsDeleted("blobId2"); }
@Test public void blobTimestampMustBeBiggerThanFileTimestamp() throws Exception { BlobDeletionCallback bdc1 = adbc.getBlobDeletionCallback(); bdc1.deleted("blobId1", Collections.singleton("/a")); bdc1.commitProgress(COMMIT_SUCCEDED); BlobDeletionCallback bdc2 = adbc.getBlobDeletionCallback(); bdc2.deleted("blobId2", Collections.singleton("/b")); BlobDeletionCallback bdc3 = adbc.getBlobDeletionCallback(); bdc3.deleted("blobId3", Collections.singleton("/c")); bdc3.commitProgress(COMMIT_SUCCEDED); long time = clock.getTimeIncreasing(); clock.waitUntil(clock.getTime() + TimeUnit.MINUTES.toMillis(1)); bdc2.commitProgress(COMMIT_SUCCEDED); adbc.purgeBlobsDeleted(time, blobStore); //blobId2 is committed later verifyBlobsDeleted("blobId1", "blobId3"); }
adbc.purgeBlobsDeleted(clock.getTimeIncreasing(), blobStore); blobStore.deletedChunkIds.removeAll(externallyDeletedChunks); adbc.purgeBlobsDeleted(clock.getTimeIncreasing(), blobStore);
@Test public void deleteBlobsDespiteFileExplicitlyPurgedBeforeRestart() throws Exception { BlobDeletionCallback bdc = adbc.getBlobDeletionCallback(); bdc.deleted("blobId1", Collections.singleton("/a")); bdc.commitProgress(COMMIT_SUCCEDED); clock.waitUntil(clock.getTime() + TimeUnit.MINUTES.toMillis(1)); createBlobCollector(); bdc = adbc.getBlobDeletionCallback(); bdc.deleted("blobId2", Collections.singleton("/b")); bdc.commitProgress(COMMIT_SUCCEDED); clock.waitUntil(clock.getTime() + TimeUnit.MINUTES.toMillis(1)); createBlobCollector(); bdc = adbc.getBlobDeletionCallback(); bdc.deleted("blobId3", Collections.singleton("/c")); bdc.commitProgress(COMMIT_SUCCEDED); adbc.purgeBlobsDeleted(clock.getTimeIncreasing(), blobStore); verifyBlobsDeleted("blobId1", "blobId2", "blobId3"); }
private void doLargeCleanupTest(int offset, int size) throws Exception { Clock clock = new Clock.Virtual(); DocumentMK mk1 = createMK(0 /* clusterId: 0 => uses clusterNodes collection */, 0, new MemoryDocumentStore(), new MemoryBlobStore()); DocumentNodeStore ns1 = mk1.getNodeStore(); // make sure we're visible and marked as active renewClusterIdLease(ns1); JournalGarbageCollector gc = new JournalGarbageCollector(ns1, 0); clock.getTimeIncreasing(); clock.getTimeIncreasing(); gc.gc(); // cleanup everything that might still be there // create entries as parametrized: for(int i=offset; i<size+offset; i++) { mk1.commit("/", "+\"regular"+i+"\": {}", null, null); // always run background ops to 'flush' the change // into the journal: ns1.runBackgroundOperations(); } Thread.sleep(100); // sleep 100millis assertEquals(size, gc.gc()); // should now be able to clean up everything }
@Test public void updateHeadWhenIdle() throws Exception { Clock clock = new Clock.Virtual(); clock.waitUntil(System.currentTimeMillis()); Revision.setClock(clock); DocumentNodeStore ns = builderProvider.newBuilder() .clock(clock).setAsyncDelay(0).getNodeStore(); doSomeChange(ns); ns.runBackgroundOperations(); Revision head1 = ns.getHeadRevision().getRevision(ns.getClusterId()); assertNotNull(head1); clock.waitUntil(clock.getTimeIncreasing() + TimeUnit.SECONDS.toMillis(30)); // background operations must not update head yet ns.runBackgroundOperations(); Revision head2 = ns.getHeadRevision().getRevision(ns.getClusterId()); assertNotNull(head2); assertEquals(head1, head2); clock.waitUntil(clock.getTimeIncreasing() + TimeUnit.SECONDS.toMillis(30)); // next run of background operations must update head ns.runBackgroundOperations(); Revision head3 = ns.getHeadRevision().getRevision(ns.getClusterId()); assertNotNull(head3); assertTrue(head1.compareRevisionTime(head3) < 0); }
long firstCommitNumChunks = blobStore.numChunks; adbc.purgeBlobsDeleted(0, blobStore);//hack to purge file long time = clock.getTimeIncreasing(); long hackPurgeNumChunks = blobStore.numChunks; assertEquals("Hack purge must not purge any blob (first commit)",