private static List<Metric> allMetrics(MetricsSystem.InstanceType instanceType) { List<Metric> metrics = new ArrayList<>(); for (Entry<String, Gauge> entry : METRIC_REGISTRY.getGauges().entrySet()) { if (entry.getKey().startsWith(instanceType.toString())) { Object value = entry.getValue().getValue(); if (!(value instanceof Number)) { LOG.warn( "The value of metric {} of type {} is not sent to metrics master," + " only metrics value of number can be collected", entry.getKey(), entry.getValue().getClass().getSimpleName()); continue; } metrics.add(Metric.from(entry.getKey(), ((Number) value).longValue())); } } for (Entry<String, Counter> entry : METRIC_REGISTRY.getCounters().entrySet()) { metrics.add(Metric.from(entry.getKey(), entry.getValue().getCount())); } for (Entry<String, Meter> entry : METRIC_REGISTRY.getMeters().entrySet()) { // TODO(yupeng): From Meter's implementation, getOneMinuteRate can only report at rate of at // least seconds. if the client's duration is too short (i.e. < 1s), then getOneMinuteRate // would return 0 metrics.add(Metric.from(entry.getKey(), entry.getValue().getOneMinuteRate())); } for (Entry<String, Timer> entry : METRIC_REGISTRY.getTimers().entrySet()) { metrics.add(Metric.from(entry.getKey(), entry.getValue().getCount())); } return metrics; }
/** * Resets all the counters to 0 for testing. */ public static void resetAllCounters() { for (Map.Entry<String, Counter> entry : METRIC_REGISTRY.getCounters().entrySet()) { entry.getValue().dec(entry.getValue().getCount()); } }
@Test(groups = "short") public void should_retry_on_write_timeout_if_statement_idempotent() { simulateError(1, write_request_timeout); session.execute(new SimpleStatement("mock query").setIdempotent(true)); assertOnWriteTimeoutWasCalled(1); assertThat(errors.getWriteTimeouts().getCount()).isEqualTo(1); assertThat(errors.getRetries().getCount()).isEqualTo(1); assertThat(errors.getRetriesOnWriteTimeout().getCount()).isEqualTo(1); assertQueried(1, 1); assertQueried(2, 1); assertQueried(3, 0); }
@Test public void testReportMetrics() { Gauge<Integer> queueSizeGauge = new Gauge<Integer>() { Counter recordsProcessedCounter = new Counter(); recordsProcessedCounter.inc(10l); recordSizeDistributionHistogram.update(3); Meter recordProcessRateMeter = new Meter(); recordProcessRateMeter.mark(1l); recordProcessRateMeter.mark(2l); recordProcessRateMeter.mark(3l); Timer totalDurationTimer = new Timer(); totalDurationTimer.update(1, TimeUnit.SECONDS); totalDurationTimer.update(2, TimeUnit.SECONDS); totalDurationTimer.update(3, TimeUnit.SECONDS); Mockito.verify(this.queueSize).setValue(1000); recordsProcessedCounter.inc(5l); recordSizeDistributionHistogram.update(4); recordProcessRateMeter.mark(4l);
Object secondInitInstance = HelixKafkaMirrorMakerMetricsReporter.get(); Assert.assertTrue(firstInitInstance == secondInitInstance); Counter testCounter0 = new Counter(); Meter testMeter0 = new Meter(); Timer testTimer0 = new Timer(); HelixKafkaMirrorMakerMetricsReporter.get().registerMetric("testCounter0", testCounter0); HelixKafkaMirrorMakerMetricsReporter.get().registerMetric("testMeter0", testMeter0); HelixKafkaMirrorMakerMetricsReporter.get().registerMetric("testTimer0", testTimer0); testCounter0.inc(); testMeter0.mark(); Context context = testTimer0.time(); context.stop(); HelixKafkaMirrorMakerMetricsReporter.get().getRegistry().getCounters().get("testCounter0") .getCount(), 1); Assert.assertEquals( HelixKafkaMirrorMakerMetricsReporter.get().getRegistry().getMeters().get("testMeter0") .getCount(), 1); Assert.assertEquals( HelixKafkaMirrorMakerMetricsReporter.get().getRegistry().getTimers().get("testTimer0") .getCount(), 1);
@Test public void testTags() throws IOException { MetricContext metricContext = MetricContext.builder(this.getClass().getCanonicalName()).build(); Counter counter = metricContext.counter("com.linkedin.example.counter"); Map<String, String> tags = new HashMap<>(); tags.put("testKey", "testValue"); tags.put("key2", "value2"); OutputStreamReporter reporter = OutputStreamReporter.Factory.newBuilder().withTags(tags).outputTo(this.stream).build(new Properties()); counter.inc(); reporter.report(); Assert.assertTrue(this.stream.toString().contains("key2=value2")); Assert.assertTrue(this.stream.toString().contains("testKey=testValue")); String[] lines = this.stream.toString().split("\n"); Map<String, Set<String>> expected = new HashMap<>(); expectMetrics(expected, lines); Set<String> counterSubMetrics = new HashSet<>(); counterSubMetrics.add("count"); expected.put("com.linkedin.example.counter", counterSubMetrics); reporter.close(); }
@Override public void handle(final Request req, final Response rsp) throws Throwable { MetricRegistry registry = req.require(MetricRegistry.class); Counter counter = registry.counter("request.actives"); Timer.Context timer = registry.timer("request").time(); counter.inc(); rsp.complete((ereq, ersp, x) -> { timer.stop(); counter.dec(); Meter meter = registry.meter("responses." + rsp.status().orElse(Status.OK).value()); meter.mark(); }); }
@Test public void testWithTags() throws IOException { try ( MetricContext metricContext = MetricContext.builder(this.getClass().getCanonicalName() + ".testGraphiteReporter") .addTag(new Tag<String>("taskId", "task_testjob_123")) .addTag(new Tag<String>("forkBranchName", "fork_1")).build(); InfluxDBReporter influxDBReporter = InfluxDBReporter.Factory.newBuilder() .withInfluxDBPusher(influxDBPusher) .withMetricContextName(CONTEXT_NAME) .build(new Properties());) { Counter counter = metricContext.counter(MetricRegistry.name(METRIC_PREFIX, COUNTER)); counter.inc(5l); influxDBReporter.report(new TreeMap<String, Gauge>(), metricContext.getCounters(), new TreeMap<String, Histogram>(), new TreeMap<String, Meter>(), new TreeMap<String, Timer>(), metricContext.getTagMap()); //InfluxDB converts all values to float64 internally Assert.assertEquals(getMetricValue("task_testjob_123.fork_1." + METRIC_PREFIX, COUNTER, Measurements.COUNT), Float.toString(5f)); } }
@Test public void testFilterByName() throws Exception { SLAEventKafkaJobMonitor monitor = new SLAEventKafkaJobMonitor("topic", null, new URI("/base/URI"), HighLevelConsumerTest.getSimpleConfig(Optional.of(KafkaJobMonitor.KAFKA_JOB_MONITOR_PREFIX)), new NoopSchemaVersionWriter(), Optional.<Pattern>absent(), Optional.of(Pattern.compile("^accept.*")), this.templateURI, ImmutableMap.<String, String>of()); monitor.buildMetricsContextAndMetrics(); GobblinTrackingEvent event; Collection<Either<JobSpec, URI>> jobSpecs; event = createSLAEvent("acceptthis", new URI("/data/myDataset"), Maps.<String, String>newHashMap()); jobSpecs = monitor.parseJobSpec(event); Assert.assertEquals(jobSpecs.size(), 1); Assert.assertEquals(monitor.getRejectedEvents().getCount(), 0); event = createSLAEvent("donotacceptthis", new URI("/data/myDataset"), Maps.<String, String>newHashMap()); jobSpecs = monitor.parseJobSpec(event); Assert.assertEquals(jobSpecs.size(), 0); Assert.assertEquals(monitor.getRejectedEvents().getCount(), 1); monitor.shutdownMetrics(); }
@Nullable private Message postProcessMessage(RawMessage raw, Codec codec, String inputIdOnCurrentNode, String baseMetricName, Message message, long decodeTime) { if (message == null) { metricRegistry.meter(name(baseMetricName, "failures")).mark(); return null; metricRegistry.meter(name(baseMetricName, "incomplete")).mark(); if (LOG.isDebugEnabled()) { LOG.debug("Dropping incomplete message {} on input <{}>. Parsed fields: [{}]", metricRegistry.timer(name(baseMetricName, "parseTime")).update(decodeTime, TimeUnit.NANOSECONDS); metricRegistry.meter(name(baseMetricName, "processedMessages")).mark(); decodedTrafficCounter.inc(message.getSize()); return message;
private <T> T runWithMetrics(String opName, Function<Void,T> impl) { Preconditions.checkNotNull(opName); Preconditions.checkNotNull(impl); final MetricManager mgr = MetricManager.INSTANCE; mgr.getCounter(metricsPrefix, opName, M_CALLS).inc(); final Timer.Context tc = mgr.getTimer(metricsPrefix, opName, M_TIME).time(); try { return impl.apply(null); } catch (RuntimeException e) { mgr.getCounter(metricsPrefix, opName, M_EXCEPTIONS).inc(); throw e; } finally { tc.stop(); } } }
@Test(groups = "short") public void should_ignore_read_timeout() { simulateError(1, read_request_timeout); ResultSet rs = query(); assertThat(rs.iterator().hasNext()).isFalse(); // ignore decisions produce empty result sets assertOnReadTimeoutWasCalled(1); assertThat(errors.getIgnores().getCount()).isEqualTo(1); assertThat(errors.getRetries().getCount()).isEqualTo(0); assertThat(errors.getIgnoresOnReadTimeout().getCount()).isEqualTo(1); assertThat(errors.getRetriesOnReadTimeout().getCount()).isEqualTo(0); assertQueried(1, 1); assertQueried(2, 0); assertQueried(3, 0); }
public synchronized void rebalanceCurrentCluster(List<LiveInstance> liveInstances) { Context context = _rebalanceTimer.time(); LOGGER.info("AutoRebalanceLiveInstanceChangeListener.onLiveInstanceChange() wakes up!"); try { _numLiveInstances.inc(liveInstances.size() - _numLiveInstances.getCount()); if (!_helixManager.isLeader()) { LOGGER.info("Not leader, do nothing!"); _rebalanceRate.mark(); } finally { context.close();