@Override public void afterCompletion() { listeners.add(listener); sleepSeconds(PAUSE_FOR_LAST_EVENTS_SECONDS); } }
private void sleep(TestPhase currentTestPhase) { if (randomWorker) { Random random = new Random(); if (random.nextBoolean()) { return; } } if (allPhases || currentTestPhase.equals(testPhase)) { sleepSeconds(sleepSeconds); } } }
private void sleepRecursion(int recursionDepth, int sleepSeconds) { if (recursionDepth == 0) { sleepSeconds(sleepSeconds); return; } sleepRecursion(recursionDepth - 1, sleepSeconds); } }
@TimeStep public void timeStep() { sleepSeconds(1); }
void start() { if (!isEC2(cloudProvider)) { LOGGER.info("No Harakiri monitor is active: only on AWS-EC2 unused machines will be terminated."); return; } LOGGER.info(format("Harakiri monitor is active and will wait %d seconds to kill this instance", waitSeconds)); sleepSeconds(waitSeconds); LOGGER.info("Trying to commit Harakiri once!"); try { LOGGER.info("Harakiri command: " + command); execute(command); } catch (Exception e) { throw new CommandLineExitException("Failed to execute Harakiri", e); } }
public void waitWhileListenerEventsIncrease(EntryListenerImpl listener, int maxIterationNoChange) { int iterationsWithoutChange = 0; long prev = 0; do { long diff = absDifference(listener); if (diff >= prev) { iterationsWithoutChange++; } else { iterationsWithoutChange = 0; } prev = diff; sleepSeconds(2); } while (!sameEventCount(listener) && iterationsWithoutChange < maxIterationNoChange); }
public static void waitClusterSize(ILogger logger, HazelcastInstance hz, int clusterSize) { for (; ; ) { if (hz.getCluster().getMembers().size() >= clusterSize) { return; } logger.info("waiting cluster == " + clusterSize); sleepSeconds(1); } }
private void logPerformanceInfo(long startMs) { long actualDurationMs = currentTimeMillis() - startMs; if (performanceMonitorIntervalSeconds > 0) { LOGGER.info(testCase.getId() + " Waiting for all performance info"); sleepSeconds(performanceMonitorIntervalSeconds); LOGGER.info("Performance " + testCase.getId() + "\n" + performanceStatsCollector.detailedPerformanceInfo(testCase.getId(), actualDurationMs)); } }
public static void waitClusterSize(ILogger logger, HazelcastInstance hz, int clusterSize) { for (; ; ) { if (hz.getCluster().getMembers().size() >= clusterSize) { return; } logger.info("waiting cluster == " + clusterSize); sleepSeconds(1); } }
public static void waitClusterSize(ILogger logger, HazelcastInstance hz, int clusterSize) { for (; ; ) { if (hz.getCluster().getMembers().size() >= clusterSize) { return; } logger.info("waiting cluster == " + clusterSize); sleepSeconds(1); } }
private void waitForPhaseCompletion(TestPhase testPhase) { int completedWorkers = phaseCompletedMap.get(testPhase).size(); int expectedWorkers = getExpectedWorkerCount(testPhase); long started = System.nanoTime(); while (completedWorkers < expectedWorkers) { sleepSeconds(1); if (hasFailure()) { throw new TestCaseAbortedException( format("Waiting for %s completion aborted (critical failure)", testPhase.desc()), testPhase); } completedWorkers = phaseCompletedMap.get(testPhase).size(); expectedWorkers = getExpectedWorkerCount(testPhase); logMissingWorkers(testPhase, completedWorkers, expectedWorkers, started); } }
private void processTerminateWorker(TerminateWorkerOperation operation) { LOGGER.warn("Terminating worker"); if (type == WorkerType.MEMBER) { sleepSeconds(operation.getMemberWorkerShutdownDelaySeconds()); } worker.shutdown(operation.isEnsureProcessShutdown()); }
@Verify(global = false) public void verify() { sleepSeconds(maxTTLExpirySeconds * 2); logger.info(name + ": map size =" + map.size()); } }
@Verify(global = false) public void verify() { sleepSeconds(maxTTLExpirySeconds * 2); logger.info(name + ": map size =" + map.size()); } }
@Verify(global = true) public void globalVerify() { sleepSeconds(61); // provoke expire after TTL for (int i = 0; i < keyCount; i++) { cache.containsKey(i); } assertTrueEventually(new AssertTask() { @Override public void run() throws Exception { int cacheSize = cache.size(); logger.info(name + " ICache size: " + cacheSize); assertEquals(name + " ICache should be empty, but TTL events are not processed", 0, cacheSize); } }); } }
@Override public void run() throws Exception { // hack to prevent overloading the system with get calls, else it is done many times a second sleepSeconds(10); long actual = 0; for (DistributedObject distributedObject : targetInstance.getDistributedObjects()) { String key = distributedObject.getName(); if (serviceName.equals(distributedObject.getServiceName()) && key.startsWith(name)) { actual += targetInstance.getAtomicLong(key).get(); } } assertEquals(expected, actual); } }, assertEventuallySeconds);
public boolean run(TestSuite testSuite) throws Exception { if (testSuite.getDurationSeconds() == 0) { LOGGER.info("Test suite runs without time-limit, it will complete when it decides it's ready or CTRL-C is pressed"); } try { coordinator.createRunTestSuiteTask(testSuite).run(); } catch (CommandLineExitException e) { for (int i = 0; i < WAIT_FOR_WORKER_FAILURE_RETRY_COUNT && failureCollector.getFailureCount() == 0; i++) { sleepSeconds(1); } throw e; } return !failureCollector.hasCriticalFailure(); }
public String testStop(RcTestStopOperation op) throws Exception { awaitInitialized(); LOGGER.info(format("Test [%s] stopping...", op.getTestId())); TestData test = componentRegistry.getTestByAddress(SimulatorAddress.fromString(op.getTestId())); if (test == null) { throw new IllegalStateException(format("no test with id [%s] found", op.getTestId())); } for (int i = 0; i < testCompletionTimeoutSeconds; i++) { test.setStopRequested(true); sleepSeconds(1); if (test.isCompleted()) { return test.getStatusString(); } } throw new Exception("Test failed to stop within " + testCompletionTimeoutSeconds + " seconds, current status: " + test.getStatusString()); }
@TimeStep public void timeStep(ThreadState state) throws Exception { double usedPercentage = heapUsedPercentage(); if (usedPercentage >= maxHeapUsagePercentage) { logger.info(name + " heap used: " + usedPercentage + " %, map size: " + map.size()); sleepSeconds(10); } else { for (int i = 0; i < 1000; i++) { state.counter++; if (state.counter % 100000 == 0) { logger.info(name + " at: " + state.counter + ", heap used: " + usedPercentage + " %, map size: " + map.size()); } long key = state.randomLong(); map.put(key, 0L, 24, HOURS); } } }
@Prepare public void prepare() { if (!isMemberNode(targetInstance)) { return; } int retry = 0; while (!partitionService.isClusterSafe() && retry++ < isClusterSafeRetries) { logger.info(name + ": isClusterSafe() " + partitionService.isClusterSafe()); sleepSeconds(1); } logger.info(name + ": isClusterSafe() " + partitionService.isClusterSafe()); logger.info(name + ": isLocalMemberSafe() " + partitionService.isLocalMemberSafe()); logger.info(name + ": getCluster().getMembers().size() " + targetInstance.getCluster().getMembers().size()); logPartitionStatistics(logger, name, map, false); }