@Override public void run() { CommonUtils.sleepMs(lifetimeMs); LOG.info("Process has timed out after {}ms, exiting now", lifetimeMs); System.exit(-1); } }, "life-limiter").start();
@Override public void run() { while (true) { CommonUtils.sleepMs(intervalMs); if (Thread.interrupted()) { return; } stream.print("."); } } });
/** * Kills Alluxio Master by 'kill -9' command. */ private static void killMaster() { String[] killMasterCommand = new String[]{"/usr/bin/env", "bash", "-c", "for pid in `ps -Aww -o pid,command | grep -i \"[j]ava\" | grep " + "\"alluxio.master.AlluxioMaster\" | awk '{print $1}'`; do kill -9 \"$pid\"; done"}; try { Runtime.getRuntime().exec(killMasterCommand).waitFor(); CommonUtils.sleepMs(LOG, 1000); } catch (Exception e) { LOG.error("Error when killing Master", e); } }
/** * @return true if stdin has data before {@link #TIMEOUT_MS} elapses */ private static boolean stdinHasData() throws IOException { long start = System.currentTimeMillis(); while (System.in.available() == 0) { if (System.currentTimeMillis() - start > TIMEOUT_MS) { System.out.println( "Timed out waiting for input from stdin. Use -noTimeout to wait longer."); return false; } CommonUtils.sleepMs(50); } return true; }
private static void stopAlluxioFramework() throws Exception { String stopScript = PathUtils.concatPath(sConf.get(PropertyKey.HOME), "integration", "mesos", "bin", "alluxio-mesos-stop.sh"); ProcessBuilder pb = new ProcessBuilder(stopScript); pb.start().waitFor(); // Wait for Mesos to unregister and shut down the Alluxio Framework. CommonUtils.sleepMs(5000); }
/** * Starts Alluxio Master by executing the launch script. */ private static void startMaster() { String alluxioStartPath = PathUtils.concatPath(sConf.get(PropertyKey.HOME), "bin", "alluxio-start.sh"); String startMasterCommand = String.format("%s master", alluxioStartPath); try { Runtime.getRuntime().exec(startMasterCommand).waitFor(); CommonUtils.sleepMs(LOG, 1000); } catch (Exception e) { LOG.error("Error when starting Master", e); } }
/** * Stops the current Alluxio cluster. This is used for preparation and clean up. * To crash the Master, use {@link #killMaster()}. */ private static void stopCluster() { String alluxioStopPath = PathUtils.concatPath(sConf.get(PropertyKey.HOME), "bin", "alluxio-stop.sh"); String stopClusterCommand = String.format("%s all", alluxioStopPath); try { Runtime.getRuntime().exec(stopClusterCommand).waitFor(); CommonUtils.sleepMs(LOG, 1000); } catch (Exception e) { LOG.error("Error when stop Alluxio cluster", e); } } }
@Override public boolean attempt() { if (mAttemptCount == 0) { // first attempt, set the start time mStartMs = CommonUtils.getCurrentMs(); mAttemptCount++; return true; } if (mSleepMs > 0) { CommonUtils.sleepMs(mSleepMs); } if ((CommonUtils.getCurrentMs() - mStartMs) <= mRetryTimeoutMs) { mAttemptCount++; return true; } return false; } }
/** * Main loop for the cleanup, continuously looks for zombie sessions. */ @Override public void run() { long lastCheckMs = System.currentTimeMillis(); while (mRunning) { // Check the time since last check, and wait until it is within check interval long lastIntervalMs = System.currentTimeMillis() - lastCheckMs; long toSleepMs = mCheckIntervalMs - lastIntervalMs; if (toSleepMs > 0) { CommonUtils.sleepMs(LOG, toSleepMs); } else { LOG.warn("Session cleanup took: {}, expected: {}", lastIntervalMs, mCheckIntervalMs); } // Check if any sessions have become zombies, if so clean them up lastCheckMs = System.currentTimeMillis(); for (long session : mSessions.getTimedOutSessions()) { mSessions.removeSession(session); for (SessionCleanable sc : mSessionCleanables) { sc.cleanupSession(session); } } } }
@Override public SerializableVoid runTask(SleepJobConfig config, SerializableVoid args, JobWorkerContext jobWorkerContext) throws Exception { CommonUtils.sleepMs(config.getTimeMs()); return null; } }
@Test public void sampleAfterCooldown() { setupLogger(1); doReturn(true).when(mBaseLogger).isWarnEnabled(); for (int i = 0; i < 10; i++) { mSamplingLogger.warn("warning1"); mSamplingLogger.warn("warning2"); CommonUtils.sleepMs(2); } verify(mBaseLogger, times(10)).warn("warning1"); verify(mBaseLogger, times(10)).warn("warning2"); }
/** * Tests the exponential back-off logic. */ @Test(timeout = 2000) public void backoff() { int n = 10; ExponentialTimer timer = new ExponentialTimer(1, 1000, 0, 1000); long start = System.currentTimeMillis(); for (int i = 0; i < n; i++) { while (timer.tick() == ExponentialTimer.Result.NOT_READY) { CommonUtils.sleepMs(10); } long now = System.currentTimeMillis(); Assert.assertTrue(now - start >= (1 << i - 1)); } } }
private void edgeLockTest(LockMode take, LockMode tryToTake, boolean expectBlocking) throws Exception { InodeLockManager lockManager = new InodeLockManager(); AtomicBoolean threadFinished = new AtomicBoolean(false); LockResource lock = lockManager.lockEdge(new Edge(10, "name"), take); Thread t = new Thread(() -> { // Use a new Edge each time to make sure we aren't comparing edges by reference. try (LockResource lr = lockManager.lockEdge(new Edge(10, "name"), tryToTake)) { threadFinished.set(true); } }); t.start(); if (expectBlocking) { CommonUtils.sleepMs(20); assertFalse(threadFinished.get()); lock.close(); } CommonUtils.waitFor("lock to be acquired by the second thread", () -> threadFinished.get()); } }
private void inodeLockTest(LockMode take, LockMode tryToTake, boolean expectBlocking) throws Exception { InodeLockManager lockManager = new InodeLockManager(); AtomicBoolean threadFinished = new AtomicBoolean(false); MutableInodeFile inode = MutableInodeFile.create(0, 0, "name", 0, CreateFileContext.defaults()); LockResource lock = lockManager.lockInode(inode, take); Thread t = new Thread(() -> { // Copy the inode to make sure we aren't comparing inodes by reference. MutableInodeFile inodeCopy = MutableInodeFile.fromJournalEntry(inode.toJournalEntry().getInodeFile()); try (LockResource lr = lockManager.lockInode(inodeCopy, tryToTake)) { threadFinished.set(true); } }); t.start(); if (expectBlocking) { CommonUtils.sleepMs(20); assertFalse(threadFinished.get()); lock.close(); } CommonUtils.waitFor("lock to be acquired by the second thread", () -> threadFinished.get()); }
/** * Tests the {@link CommonUtils#getCurrentMs()} and {@link CommonUtils#sleepMs(long)} methods. */ @Test public void getCurrentMsAndSleepMs() { long delta = 100; long startTime = CommonUtils.getCurrentMs(); CommonUtils.sleepMs(delta); long currentTime = CommonUtils.getCurrentMs(); /* Check that currentTime falls into the interval [startTime + delta; startTime + 2*delta] */ assertTrue(startTime + delta <= currentTime); assertTrue(currentTime <= 2 * delta + startTime); }
@Test public void compare() { JobConfig jobConfig = new TestJobConfig("unused"); JobInfo a = new JobInfo(0L, jobConfig, null); CommonUtils.sleepMs(1); JobInfo b = new JobInfo(0L, jobConfig, null); Assert.assertEquals(-1, a.compareTo(b)); b.setStatus(Status.RUNNING); CommonUtils.sleepMs(1); a.setStatus(Status.RUNNING); Assert.assertEquals(1, a.compareTo(b)); a.setStatus(Status.COMPLETED); CommonUtils.sleepMs(1); b.setStatus(Status.COMPLETED); Assert.assertEquals(-1, a.compareTo(b)); }
/** * Tests that the maximum total wait time is respected. */ @Test public void expiration() { int maxTotalWaitTimeMs = 1000; ExponentialTimer timer = new ExponentialTimer(0, 0, 0, maxTotalWaitTimeMs); Assert.assertEquals(ExponentialTimer.Result.READY, timer.tick()); CommonUtils.sleepMs(maxTotalWaitTimeMs); Assert.assertEquals(ExponentialTimer.Result.EXPIRED, timer.tick()); }
/** * Tests that the provided timeout is respected. */ @Test public void timeout() { final long timeoutMs = 500; final long slackMs = 200; TimeoutRefresh timeoutRefresh = new TimeoutRefresh(timeoutMs); // First check, should attempt Assert.assertTrue(timeoutRefresh.attempt()); // Second check, should not attempt before refresh timeout Assert.assertFalse(timeoutRefresh.attempt()); CommonUtils.sleepMs(timeoutMs); CommonUtils.sleepMs(slackMs); Assert.assertTrue(timeoutRefresh.attempt()); Assert.assertFalse(timeoutRefresh.attempt()); } }
@Test public void listLargeDirectory() throws IOException { LargeDirectoryConfig config = prepareLargeDirectoryTest(); String[] children = config.getChildren(); // Retry for some time to allow list operation eventual consistency for S3 and GCS. // See http://docs.aws.amazon.com/AmazonS3/latest/dev/Introduction.html and // https://cloud.google.com/storage/docs/consistency for more details. // Note: not using CommonUtils.waitFor here because we intend to sleep with a longer interval. UfsStatus[] results = new UfsStatus[] {}; for (int i = 0; i < 20; i++) { results = mUfs.listStatus(config.getTopLevelDirectory()); if (children.length == results.length) { break; } CommonUtils.sleepMs(500); } assertEquals(children.length, results.length); String[] resultNames = UfsStatus.convertToNames(results); Arrays.sort(resultNames); for (int i = 0; i < children.length; ++i) { assertTrue(resultNames[i].equals(CommonUtils.stripPrefixIfPresent(children[i], PathUtils.normalizePath(config.getTopLevelDirectory(), "/")))); } }
@Test public void deleteLargeDirectory() throws IOException { LargeDirectoryConfig config = prepareLargeDirectoryTest(); mUfs.deleteDirectory(config.getTopLevelDirectory(), DeleteOptions.defaults().setRecursive(true)); String[] children = config.getChildren(); for (String child : children) { // Retry for some time to allow list operation eventual consistency for S3 and GCS. // See http://docs.aws.amazon.com/AmazonS3/latest/dev/Introduction.html and // https://cloud.google.com/storage/docs/consistency for more details. // Note: not using CommonUtils.waitFor here because we intend to sleep with a longer interval. boolean childDeleted = false; for (int i = 0; i < 20; i++) { childDeleted = !mUfs.isFile(child) && !mUfs.isDirectory(child); if (childDeleted) { break; } CommonUtils.sleepMs(500); } assertTrue(childDeleted); } }