/** * @see TimeEstimator#start() */ public void start() { final StopWatch sw = swGetter.get(); sw.reset(); sw.start(); }
public void start() { stopWatch.reset(); counter.set(0); stopWatch.start(); }
public void setTest(String testName) { testStopWatch.reset(); testStopWatch.start(); currentTest = testName; }
public void addMutation(Mutation mutation) { mutationStopWatch.reset(); mutationStopWatch.start(); currentMutation = mutation; mutations.add(mutation.getId()); }
private void lapWatchAndLog( StopWatch watch, String messageForLap ) { watch.stop(); LOGGER.info( String.format( "Time: [%s] %s", watch.getTime(), messageForLap ) ); watch.reset(); watch.start(); }
stopWatch.reset(); stopWatch.start();
@Override public long getLag() { // Periodically cache the end offset if(endOffset == null || endOffsetWatch.getTime() > END_OFFSET_REFRESH_MS_DEFAULT) { Map<TopicPartition, Long> offsets = consumer.endOffsets(Collections.singletonList(new TopicPartition(topicName, 0))); endOffset = offsets.get(new TopicPartition(topicName, 0)); endOffsetWatch.reset(); endOffsetWatch.start(); } // Because the end offset is only updated periodically, it's possible to see negative lag. Send 0 instead. long lag = endOffset - (getCurrentOffset() == null ? 0 : getCurrentOffset()); return lag < 0 ? 0 : lag; }
stopWatch.reset(); stopWatch.start();
stopWatch.reset(); stopWatch.start();
stopWatch.reset(); monitorObj.notify();
public static void deleteResults(Session session, Query q) { @SuppressWarnings("unchecked") List<Mutation> mutations = q.list(); int deletes = 0, flushs = 0; StopWatch stp = new StopWatch(); for (Mutation m : mutations) { MutationTestResult result = m.getMutationResult(); if (result != null) { m.setMutationResult(null); session.delete(result); deletes++; } if (deletes > 20) { // 20, same as the JDBC batch size // flush a batch of inserts and release memory: // see // http://www.hibernate.org/hib_docs/reference/en/html/batch.html stp.reset(); stp.start(); flushs++; session.flush(); // session.clear(); logger.info("Did flush. It took: " + DurationFormatUtils.formatDurationHMS(stp.getTime())); deletes = 0; } } logger.info(String.format("Deleted %d mutation results", mutations.size())); }
/** * Turns the current mutation on. */ public void switchOn() { if (currentMutation != null) { logger.info("enabling mutation: " + currentMutation.getMutationVariable() + " in line " + currentMutation.getLineNumber() + " - " + currentMutation.toString()); stopWatch.reset(); stopWatch.start(); System.setProperty(currentMutation.getMutationVariable(), "1"); System.setProperty(CURRENT_MUTATION_KEY, currentMutation.getId() + ""); } }
private void returnAllIndicesCurrentStateAndReset() { LOG.debug("Master Actor message received for DONE check, status is:" + allIndexingDone); getSender().tell(allIndexingDone, getSelf()); // Reset current state if (allIndexingDone) { LOG.debug("Indexing setup finished for all indices!"); allIndexingDone = false; indexDone.clear(); // Setting it here, but need to check that it will never be called, // if client dies. // put additional check when you receive new rebuild call, 1 min // check isRebuildInProgress = false; stopWatch.reset(); // TODO as it is single instance, need not to stop it. // getContext().stop(getSelf()); // TODO: check when the alising should be changed. } }
private void handleIndexingRebuildMessage(final Object message) { // Start watch first time. if (!isRebuildInProgress) { stopWatch.start(); } // need to validate the hanging state here // All indexing done, but rebuild in progress. wait 5 min for client // otherwise reset state. if (allIndexingDone && isRebuildInProgress) { if (stopWatch.getTime() > 5 * 60 * 1000) { isRebuildInProgress = false; stopWatch.reset(); } } if (isRebuildInProgress) { LOG.error( "Rebuilding is already in progress, ignoring another rebuild message: {}", message); } else { isRebuildInProgress = true; setupIndicesForAll(); } }
stp.reset(); stp.start(); flushs++;
public void sendDESCOrderedEventsToWindow(StreamWindow window, StreamWindowRepository.StorageType storageType, int num) { LOGGER.info("Sending {} events to {} ({})", num, window.getClass().getSimpleName(), storageType); StopWatch stopWatch = new StopWatch(); stopWatch.start(); int i = 0; while (i < num) { PartitionedEvent event = MockSampleMetadataFactory.createPartitionedEventGroupedByName("sampleStream_1", (window.startTime() + i)); window.add(event); i++; } stopWatch.stop(); performanceReport.put(num + "\tInsertTime\t" + storageType, stopWatch.getTime()); LOGGER.info("Inserted {} events in {} ms", num, stopWatch.getTime()); stopWatch.reset(); stopWatch.start(); window.flush(); stopWatch.stop(); performanceReport.put(num + "\tReadTime\t" + storageType, stopWatch.getTime()); }
LOG.info("walked [" + batchSize + "] interactions in " + getProgressMsg(batchSize, duration)); watchForBatch.reset(); watchForBatch.start();
watch.reset(); watch.start(); while (gerritServer.getGerritProjects().size() == 0) {
LOG.info("resolved batch of [" + batchSize + "] names in " + getProgressMsg(batchSize, duration)); watchForBatch.reset(); watchForBatch.start();
assertFalse(graph2.getPropertyDefinition("p1").isSortable()); timeout.reset(); timeout.start(); while (timeout.getTime() < 5000) {