/** * Logs the timing for a metric * * @param fullTimerName The full name of timer * @param duration The log time duration time value * @param timeUnit The log time duration time unit */ private void addValueToTimer(String fullTimerName, final long duration, final TimeUnit timeUnit) { final MetricName metricName = new MetricName(_clazz, fullTimerName); com.yammer.metrics.core.Timer timer = MetricsHelper.newTimer(_metricsRegistry, metricName, TimeUnit.MILLISECONDS, TimeUnit.SECONDS); MetricsHelper.newTimer(_metricsRegistry, metricName, TimeUnit.MILLISECONDS, TimeUnit.SECONDS) .update(duration, timeUnit); }
/** * Adds a recorded duration. * * @param duration the length of the duration * @param unit the scale unit of {@code duration} */ public void update(long duration, TimeUnit unit) { update(unit.toNanos(duration)); }
public void update(long duration, TimeUnit timeUnit) { lastUpdateMillis = System.currentTimeMillis(); t.update(duration, timeUnit); }
public void update(Integer statusCode, long duration, TimeUnit unit) { metrics.get(statusCode == null ? defaultStatusCode : statusCode.intValue()).update(duration, unit); } }
/** * Stops recording the elapsed time and updates the timer. */ public void stop() { timer.update(clock.tick() - startTime, TimeUnit.NANOSECONDS); } }
private void reportIndexingLatency(JSONObject obj) { if (_eventCreatedTimestampField != null) { long createdTimestamp = obj.optLong(_eventCreatedTimestampField); if (createdTimestamp > 0) { IndexingLatencyTimer.update(System.currentTimeMillis() - createdTimestamp, TimeUnit.MILLISECONDS); } } }
public QueryRequest takeQuery() throws InterruptedException { QueryRequest request = blockingQueue.take(); queueTimer.update(System.currentTimeMillis() - request.queueStartTime, TimeUnit.MILLISECONDS); queueCount.dec(); return request; } }
@Override public void collect(long elapsedTime, StatementContext ctx) { final Timer timer = getTimer(ctx); timer.update(elapsedTime, TimeUnit.NANOSECONDS); }
/** * Times and records the duration of event. * * @param event a {@link Callable} whose {@link Callable#call()} method implements a process * whose duration should be timed * @param <T> the type of the value returned by {@code event} * @return the value returned by {@code event} * @throws Exception if {@code event} throws an {@link Exception} */ public <T> T time(Callable<T> event) throws Exception { final long startTime = clock.tick(); try { return event.call(); } finally { update(clock.tick() - startTime); } }
/** * Part 1 - SETUP * Initialize query run -- parse options, create Query object */ protected void setup() throws Exception { long startTime = System.currentTimeMillis(); MeshQuerySource.queueTimes.update(creationTime - startTime, TimeUnit.MILLISECONDS); query = CodecJSON.decodeString(Query.class, options.get("query")); // set as soon as possible (and especially before creating op processor) query.queryPromise = bridge.queryPromise; // Parse the query and return a reference to the last QueryOpProcessor. ChannelProgressivePromise opPromise = new DefaultChannelProgressivePromise(null, ImmediateEventExecutor.INSTANCE); queryOpProcessor = query.newProcessor(bridge, opPromise); }
@Nullable @Override public Bundle next() throws DataChannelError { if ((skipSourceExit > 0) && (consecutiveFileSkip.get() >= skipSourceExit)) { throw new DataChannelError("skipped too many sources: " + skipSourceExit + ". please check your job config."); } int countdown = pollCountdown; while (((localInitialized || waitForInitialized()) && (pollCountdown == 0)) || (countdown-- > 0)) { long startTime = jmxMetrics ? System.currentTimeMillis() : 0; Bundle next = pollAndCloseOnInterrupt(pollInterval, TimeUnit.MILLISECONDS); if (jmxMetrics) { readTimer.update(System.currentTimeMillis() - startTime, TimeUnit.MILLISECONDS); } if (next != null) { return next; } if (closeFuture.isDone()) { closeFuture.join(); return null; } if (pollCountdown > 0) { log.info("next polled null, retrying {} more times. shuttingDown={}", countdown, shuttingDown.get()); } log.info(fileStatsToString("null poll ")); } if (countdown < 0) { log.info("exit with no data during poll countdown"); } return null; }
/** * Loads the file references for a given job. * * @param job - the UID of the job to get the FileReferences for * @return - a map of the 'best' file references for each task in the given job */ @Nonnull private SetMultimap<Integer, FileReference> loadFileReferencesForJob(String job) throws InterruptedException { final long startTime = System.currentTimeMillis(); MeshFileRefCache.fileReferenceFetches.inc(); if (meshy.getChannelCount() == 0) { MeshFileRefCache.log.warn("[MeshQueryMaster] Error: there are no available mesh peers."); return ImmutableSetMultimap.of(); } SetMultimap<Integer, FileReference> fileRefDataSet = getFileReferences(job, "*"); MeshFileRefCache.log.trace("file reference details before filtering:\n {}", fileRefDataSet); fileRefDataSet = MeshFileRefCache.filterFileReferences(fileRefDataSet); MeshFileRefCache.log.trace("file reference details after filtering:\n{}", fileRefDataSet); long duration = System.currentTimeMillis() - startTime; MeshFileRefCache.log.debug("File reference retrieval time: {}", duration); MeshFileRefCache.fileReferenceFetchTimes.update(duration, TimeUnit.MILLISECONDS); return fileRefDataSet; }
@Override public Response onCompleted(final Response response) { activeRequests.decrementAndGet(); if (response.getStatusCode() == 202) { handler.onSuccess(file); } else { handler.onError(new IOException(String.format("Received response %d: %s", response.getStatusCode(), response.getStatusText())), file); } sendTimer.update(System.nanoTime() - startTime, TimeUnit.NANOSECONDS); return response; // never read }
/** * Part 3 - SEARCH * Run the search -- most of this logic is in QueryEngine.search(). We only take care of logging times and * passing the sendComplete message along. */ protected void search() { final long searchStartTime = System.currentTimeMillis(); finalEng.search(query, queryOpProcessor, bridge.getQueryPromise()); queryOpProcessor.sendComplete(); final long searchDuration = System.currentTimeMillis() - searchStartTime; if (log.isDebugEnabled() || query.isTraced()) { Query.traceLog.info("[QueryReference] search complete {} in {}ms directory: {} slow={} rowsIn: {}", query.uuid(), searchDuration, goldDirString, searchDuration > MeshQuerySource.slowQueryThreshold, queryOpProcessor.getInputRows()); } MeshQuerySource.queryTimes.update(searchDuration, TimeUnit.MILLISECONDS); } }
/** * Part 2 - ENGINE CACHE * Get a QueryEngine for our query -- check the cache for a suitable candidate, otherwise make one. * Most of this logic is handled by the QueryEngineCache.get() function. */ protected QueryEngine getEngine() throws Exception { final long engineGetStartTime = System.currentTimeMillis(); // Use the canonical path stored in the canonicalDirString to create a QueryEngine. By that way // if the alias changes new queries will use the latest available // database and the old engines will be automatically closed after their TTL expires. QueryEngine engine = MeshQuerySource.queryEngineCache.getAndLease(goldDirString); final long engineGetDuration = System.currentTimeMillis() - engineGetStartTime; MeshQuerySource.engineGetTimer.update(engineGetDuration, TimeUnit.MILLISECONDS); if (engine == null) //Cache returned null -- this doesn't mean cache miss. It means something went fairly wrong { log.warn("[QueryReference] Unable to retrieve queryEngine for query: {}, key: {} after waiting: {}ms", query.uuid(), goldDirString, engineGetDuration); throw new DataChannelError("Unable to retrieve queryEngine for query: " + query.uuid() + ", key: " + goldDirString + " after waiting: " + engineGetDuration + "ms"); } //else we got an engine so we're good -- maybe this logic should be in the cache get if ((engineGetDuration > MeshQuerySource.slowQueryThreshold) || log.isDebugEnabled() || query.isTraced()) { Query.traceLog.info( "[QueryReference] Retrieved queryEngine for query: {}, key:{} after waiting: {}ms. slow={}", query.uuid(), goldDirString, engineGetDuration, engineGetDuration > MeshQuerySource.slowQueryThreshold); } return engine; }
for (final Quantity sample : entry.getValue()) { final TimeUnit timeUnit = sample.getUnit() == null ? TimeUnit.MILLISECONDS : toTimeUnit(sample.getUnit()); timer.update( sample.getValue().longValue(), timeUnit);
/** takes nanoseconds **/ public void addNano(long nanos) { // convert to microseconds. 1 millionth latency.update(nanos, TimeUnit.NANOSECONDS); totalLatency.inc(nanos / 1000); totalLatencyHistogram.add(nanos / 1000); recentLatencyHistogram.add(nanos / 1000); for(LatencyMetrics parent : parents) { parent.addNano(nanos); } }
public void updateRpc(CallStats stats) { this.callTimer.update(stats.getCallTimeMs(), TimeUnit.MILLISECONDS); this.reqHist.update(stats.getRequestSizeBytes()); this.respHist.update(stats.getResponseSizeBytes()); }
Keyspace.open(command.ksName).getColumnFamilyStore(command.cfName).metric.coordinatorReadLatency.update(latency, TimeUnit.NANOSECONDS);
} else if (metric instanceof Timer) { ((Timer) metric).update((long) (value * 1e9), TimeUnit.NANOSECONDS); } else { logger.debug("didn't find a class to map to");