public static void initialize() { final MetricConfig metricConfig = new MetricConfig() .samples(100) .timeWindow( 1000, TimeUnit.MILLISECONDS ); final List<MetricsReporter> reporters = new ArrayList<>(); reporters.add(new JmxReporter("io.confluent.ksql.metrics")); // Replace all static contents other than Time to ensure they are cleaned for tests that are // not aware of the need to initialize/cleanup this test, in case test processes are reused. // Tests aware of the class clean everything up properly to get the state into a clean state, // a full, fresh instantiation here ensures something like KsqlEngineMetricsTest running after // another test that used MetricsCollector without running cleanUp will behave correctly. metrics = new Metrics(metricConfig, reporters, new SystemTime()); collectorMap = new ConcurrentHashMap<>(); }
/** * @return true if the sensor's record level indicates that the metric will be recorded, false otherwise */ public boolean shouldRecord() { return this.recordingLevel.shouldRecord(config.recordLevel().id); } /**
public static void main(String[] args) { Map<String, String> metricTags = Collections.singletonMap("client-id", "client-id"); MetricConfig metricConfig = new MetricConfig().tags(metricTags); Metrics metrics = new Metrics(metricConfig); ProducerMetrics metricsRegistry = new ProducerMetrics(metrics); System.out.println(Metrics.toHtmlTable("kafka.producer", metricsRegistry.getAllTemplates())); }
public boolean isComplete(long timeMs, MetricConfig config) { return timeMs - lastWindowMs >= config.timeWindowMs() || eventCount >= config.eventWindow(); } }
protected void purgeObsoleteSamples(MetricConfig config, long now) { long expireAge = config.samples() * config.timeWindowMs(); for (Sample sample : samples) { if (now - sample.lastWindowMs >= expireAge) sample.reset(now); } }
Collections.singletonMap(AdminClientConfig.CLIENT_ID_CONFIG, clientId)); Map<String, String> metricTags = Collections.singletonMap("client-id", clientId); MetricConfig metricConfig = new MetricConfig().samples(config.getInt(AdminClientConfig.METRICS_NUM_SAMPLES_CONFIG)) .timeWindow(config.getLong(AdminClientConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS) .recordLevel(Sensor.RecordingLevel.forName(config.getString(AdminClientConfig.METRICS_RECORDING_LEVEL_CONFIG))) .tags(metricTags); reporters.add(new JmxReporter(JMX_PREFIX)); metrics = new Metrics(metricConfig, reporters, time);
@Before public void setup() { config = new MetricConfig().eventWindow(50).samples(2); time = new MockTime(); metrics = new Metrics(config, Arrays.asList((MetricsReporter) new JmxReporter()), time, true); }
final Metrics metrics = new Metrics(new MetricConfig().quota(Quota.upperBound(Double.MAX_VALUE)) .timeWindow(1, TimeUnit.MILLISECONDS) .samples(100)); final Sensor sensor = metrics.sensor("sensor");
/** * Create a metrics repository with no metric reporters and default configuration. * Expiration of Sensors is disabled. */ public Metrics() { this(new MetricConfig()); }
@Test public void testRateWindowing() throws Exception { MetricConfig cfg = new MetricConfig().samples(3); Sensor s = metrics.sensor("test.sensor", cfg); MetricName rateMetricName = metrics.metricName("test.rate", "grp1"); int count = cfg.samples() - 1; time.sleep(cfg.timeWindowMs()); assertEquals(sum, (Double) totalMetric.metricValue(), EPS); time.sleep(cfg.timeWindowMs() / 2); double elapsedSecs = (cfg.timeWindowMs() * (cfg.samples() - 1) + cfg.timeWindowMs() / 2) / 1000.0; time.sleep(cfg.timeWindowMs() * cfg.samples()); assertEquals(0, (Double) rateMetric.metricValue(), EPS); assertEquals(0, (Double) countRateMetric.metricValue(), EPS);
@Test public void testShouldRecord() { MetricConfig debugConfig = new MetricConfig().recordLevel(Sensor.RecordingLevel.DEBUG); MetricConfig infoConfig = new MetricConfig().recordLevel(Sensor.RecordingLevel.INFO); Sensor infoSensor = new Sensor(null, "infoSensor", null, debugConfig, new SystemTime(), 0, Sensor.RecordingLevel.INFO); assertTrue(infoSensor.shouldRecord()); infoSensor = new Sensor(null, "infoSensor", null, debugConfig, new SystemTime(), 0, Sensor.RecordingLevel.DEBUG); assertTrue(infoSensor.shouldRecord()); Sensor debugSensor = new Sensor(null, "debugSensor", null, infoConfig, new SystemTime(), 0, Sensor.RecordingLevel.INFO); assertTrue(debugSensor.shouldRecord()); debugSensor = new Sensor(null, "debugSensor", null, infoConfig, new SystemTime(), 0, Sensor.RecordingLevel.DEBUG); assertFalse(debugSensor.shouldRecord()); }
@Test public void testQuotas() { Sensor sensor = metrics.sensor("test"); sensor.add(metrics.metricName("test1.total", "grp1"), new Total(), new MetricConfig().quota(Quota.upperBound(5.0))); sensor.add(metrics.metricName("test2.total", "grp1"), new Total(), new MetricConfig().quota(Quota.lowerBound(0.0))); sensor.record(5.0); try { sensor.record(1.0); fail("Should have gotten a quota violation."); } catch (QuotaViolationException e) { // this is good } assertEquals(6.0, (Double) metrics.metrics().get(metrics.metricName("test1.total", "grp1")).metricValue(), EPS); sensor.record(-6.0); try { sensor.record(-1.0); fail("Should have gotten a quota violation."); } catch (QuotaViolationException e) { // this is good } }
private Sample advance(MetricConfig config, long timeMs) { this.current = (this.current + 1) % config.samples(); if (this.current >= samples.size()) { Sample sample = newSample(timeMs); this.samples.add(sample); return sample; } else { Sample sample = current(timeMs); sample.reset(timeMs); return sample; } }
/** * Create a MetricName with the given name, group, description and tags, plus default tags specified in the metric * configuration. Tag in tags takes precedence if the same tag key is specified in the default metric configuration. * * @param name The name of the metric * @param group logical group name of the metrics to which this metric belongs * @param description A human-readable description to include in the metric * @param tags additional key/value attributes of the metric */ public MetricName metricName(String name, String group, String description, Map<String, String> tags) { Map<String, String> combinedTag = new LinkedHashMap<>(config.tags()); combinedTag.putAll(tags); return new MetricName(name, group, description, combinedTag); }
public void checkQuotas(long timeMs) { for (KafkaMetric metric : this.metrics.values()) { MetricConfig config = metric.config(); if (config != null) { Quota quota = config.quota(); if (quota != null) { double value = metric.measurableValue(timeMs); if (!quota.acceptable(value)) { throw new QuotaViolationException(metric.metricName(), value, quota.bound()); } } } } }
@Override public long windowSize(MetricConfig config, long now) { stat.purgeObsoleteSamples(config, now); long elapsed = now - stat.oldest(now).lastWindowMs; return elapsed < config.timeWindowMs() ? config.timeWindowMs() : elapsed; } }
MetricConfig metricConfig = new MetricConfig().samples(config.getInt(ProducerConfig.METRICS_NUM_SAMPLES_CONFIG)) .timeWindow(config.getLong(ProducerConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS) .recordLevel(Sensor.RecordingLevel.forName(config.getString(ProducerConfig.METRICS_RECORDING_LEVEL_CONFIG))) .tags(metricTags); List<MetricsReporter> reporters = config.getConfiguredInstances(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, MetricsReporter.class,
@Test public void testEventWindowing() { Count count = new Count(); MetricConfig config = new MetricConfig().eventWindow(1).samples(2); count.record(config, 1.0, time.milliseconds()); count.record(config, 1.0, time.milliseconds()); assertEquals(2.0, count.measure(config, time.milliseconds()), EPS); count.record(config, 1.0, time.milliseconds()); // first event times out assertEquals(2.0, count.measure(config, time.milliseconds()), EPS); }
/** * Create a metrics repository with no metric reporters and default configuration. * Expiration of Sensors is disabled. */ public Metrics(Time time) { this(new MetricConfig(), new ArrayList<MetricsReporter>(0), time); }
public long windowSize(MetricConfig config, long now) { // purge old samples before we compute the window size stat.purgeObsoleteSamples(config, now); /* * Here we check the total amount of time elapsed since the oldest non-obsolete window. * This give the total windowSize of the batch which is the time used for Rate computation. * However, there is an issue if we do not have sufficient data for e.g. if only 1 second has elapsed in a 30 second * window, the measured rate will be very high. * Hence we assume that the elapsed time is always N-1 complete windows plus whatever fraction of the final window is complete. * * Note that we could simply count the amount of time elapsed in the current window and add n-1 windows to get the total time, * but this approach does not account for sleeps. SampledStat only creates samples whenever record is called, * if no record is called for a period of time that time is not accounted for in windowSize and produces incorrect results. */ long totalElapsedTimeMs = now - stat.oldest(now).lastWindowMs; // Check how many full windows of data we have currently retained int numFullWindows = (int) (totalElapsedTimeMs / config.timeWindowMs()); int minFullWindows = config.samples() - 1; // If the available windows are less than the minimum required, add the difference to the totalElapsedTime if (numFullWindows < minFullWindows) totalElapsedTimeMs += (minFullWindows - numFullWindows) * config.timeWindowMs(); return totalElapsedTimeMs; }