/** * Create a MetricName with the given name, group, description and tags, plus default tags specified in the metric * configuration. Tag in tags takes precedence if the same tag key is specified in the default metric configuration. * * @param name The name of the metric * @param group logical group name of the metrics to which this metric belongs * @param description A human-readable description to include in the metric * @param tags additional key/value attributes of the metric */ public MetricName metricName(String name, String group, String description, Map<String, String> tags) { Map<String, String> combinedTag = new LinkedHashMap<>(config.tags()); combinedTag.putAll(tags); return new MetricName(name, group, description, combinedTag); }
protected MetricName name(String metricName) { return new MetricName(metricName, "group-id", "desc", Collections.<String, String>emptyMap()); }
void addPartitionSensors(int partition) { Sensor recordsProducedSensor = metrics.sensor("records-produced-partition-" + partition); recordsProducedSensor.add(new MetricName("records-produced-rate-partition-" + partition, METRIC_GROUP_NAME, "The average number of records per second that are produced to this partition", _tags), new Rate()); _recordsProducedPerPartition.put(partition, recordsProducedSensor); Sensor errorsSensor = metrics.sensor("produce-error-partition-" + partition); errorsSensor.add(new MetricName("produce-error-rate-partition-" + partition, METRIC_GROUP_NAME, "The average number of errors per second when producing to this partition", _tags), new Rate()); _produceErrorPerPartition.put(partition, errorsSensor); } }
private SensorMetric<Object> buildSensor( final String key, final String metricNameString, final MeasurableStat stat, final Function<Object, Double> recordValue ) { final String name = "sec-" + key + "-" + metricNameString + "-" + id; final MetricName metricName = new MetricName( metricNameString, "consumer-metrics", "consumer-" + name, ImmutableMap.of("key", key, "id", id) ); final Sensor sensor = metrics.sensor(name); sensor.add(metricName, stat); final KafkaMetric metric = metrics.metrics().get(metricName); return new TopicSensors.SensorMetric<Object>(sensor, metric, time, true) { void record(final Object o) { sensor.record(recordValue.apply(o)); super.record(o); } }; }
_recordsProduced.add(new MetricName("records-produced-rate", METRIC_GROUP_NAME, "The average number of records per second that are produced", tags), new Rate()); _recordsProduced.add(new MetricName("records-produced-total", METRIC_GROUP_NAME, "The total number of records that are produced", tags), new Total()); _produceError.add(new MetricName("produce-error-rate", METRIC_GROUP_NAME, "The average number of errors per second", tags), new Rate()); _produceError.add(new MetricName("produce-error-total", METRIC_GROUP_NAME, "The total number of errors", tags), new Total()); _produceDelay.add(new MetricName("produce-delay-ms-avg", METRIC_GROUP_NAME, "The average delay in ms for produce request", tags), new Avg()); _produceDelay.add(new MetricName("produce-delay-ms-max", METRIC_GROUP_NAME, "The maximum delay in ms for produce request", tags), new Max()); int sizeInBytes = 4 * bucketNum; _produceDelay.add(new Percentiles(sizeInBytes, _latencyPercentileMaxMs, Percentiles.BucketSizing.CONSTANT, new Percentile(new MetricName("produce-delay-ms-99th", METRIC_GROUP_NAME, "The 99th percentile delay in ms for produce request", tags), 99.0), new Percentile(new MetricName("produce-delay-ms-999th", METRIC_GROUP_NAME, "The 99.9th percentile delay in ms for produce request", tags), 99.9), new Percentile(new MetricName("produce-delay-ms-9999th", METRIC_GROUP_NAME, "The 99.99th percentile delay in ms for produce request", tags), 99.99))); metrics.addMetric(new MetricName("produce-availability-avg", METRIC_GROUP_NAME, "The average produce availability", tags), new Measurable() { @Override
private void addSensor( final String key, final String metricNameString, final MeasurableStat stat, final List<TopicSensors.SensorMetric<ProducerRecord>> results ) { final String name = "prod-" + key + "-" + metricNameString + "-" + id; final MetricName metricName = new MetricName( metricNameString, "producer-metrics", "producer-" + name, ImmutableMap.of("key", key, "id", id) ); final Sensor existingSensor = metrics.getSensor(name); final Sensor sensor = metrics.sensor(name); // either a new sensor or a new metric with different id if (existingSensor == null || metrics.metrics().get(metricName) == null) { sensor.add(metricName, stat); } final KafkaMetric metric = metrics.metrics().get(metricName); results.add(new TopicSensors.SensorMetric<ProducerRecord>(sensor, metric, time, false) { void record(final ProducerRecord record) { sensor.record(1); super.record(record); } }); }
public ConsumeMetrics(final Metrics metrics, final Map<String, String> tags) { _bytesConsumed = metrics.sensor("bytes-consumed"); _bytesConsumed.add(new MetricName("bytes-consumed-rate", METRIC_GROUP_NAME, "The average number of bytes per second that are consumed", tags), new Rate()); _consumeError.add(new MetricName("consume-error-rate", METRIC_GROUP_NAME, "The average number of errors per second", tags), new Rate()); _consumeError.add(new MetricName("consume-error-total", METRIC_GROUP_NAME, "The total number of errors", tags), new Total()); _recordsConsumed.add(new MetricName("records-consumed-rate", METRIC_GROUP_NAME, "The average number of records per second that are consumed", tags), new Rate()); _recordsConsumed.add(new MetricName("records-consumed-total", METRIC_GROUP_NAME, "The total number of records that are consumed", tags), new Total()); _recordsDuplicated.add(new MetricName("records-duplicated-rate", METRIC_GROUP_NAME, "The average number of records per second that are duplicated", tags), new Rate()); _recordsDuplicated.add(new MetricName("records-duplicated-total", METRIC_GROUP_NAME, "The total number of records that are duplicated", tags), new Total()); _recordsLost.add(new MetricName("records-lost-rate", METRIC_GROUP_NAME, "The average number of records per second that are lost", tags), new Rate()); _recordsLost.add(new MetricName("records-lost-total", METRIC_GROUP_NAME, "The total number of records that are lost", tags), new Total()); _recordsDelayed.add(new MetricName("records-delayed-rate", METRIC_GROUP_NAME, "The average number of records per second that are either lost or arrive after maximum allowed latency under SLA", tags), new Rate()); _recordsDelayed.add(new MetricName("records-delayed-total", METRIC_GROUP_NAME, "The total number of records that are either lost or arrive after maximum allowed latency under SLA", tags), new Total()); _recordsDelay.add(new MetricName("records-delay-ms-avg", METRIC_GROUP_NAME, "The average latency of records from producer to consumer", tags), new Avg()); _recordsDelay.add(new MetricName("records-delay-ms-max", METRIC_GROUP_NAME, "The maximum latency of records from producer to consumer", tags), new Max()); int sizeInBytes = 4 * bucketNum; _recordsDelay.add(new Percentiles(sizeInBytes, _latencyPercentileMaxMs, Percentiles.BucketSizing.CONSTANT, new Percentile(new MetricName("records-delay-ms-99th", METRIC_GROUP_NAME, "The 99th percentile latency of records from producer to consumer", tags), 99.0), new Percentile(new MetricName("records-delay-ms-999th", METRIC_GROUP_NAME, "The 99.9th percentile latency of records from producer to consumer", tags), 99.9), new Percentile(new MetricName("records-delay-ms-9999th", METRIC_GROUP_NAME, "The 99.99th percentile latency of records from producer to consumer", tags), 99.99))); metrics.addMetric(new MetricName("consume-availability-avg", METRIC_GROUP_NAME, "The average consume availability", tags), new Measurable() {
private void addSensor( final String key, final String metricNameString, final MeasurableStat stat, final List<TopicSensors.SensorMetric<ConsumerRecord>> sensors, final boolean isError, final Function<ConsumerRecord, Double> recordValue ) { final String name = "cons-" + key + "-" + metricNameString + "-" + id; final MetricName metricName = new MetricName( metricNameString, "consumer-metrics", "consumer-" + name, ImmutableMap.of("key", key, "id", id) ); final Sensor existingSensor = metrics.getSensor(name); final Sensor sensor = metrics.sensor(name); // re-use the existing measurable stats to share between consumers if (existingSensor == null || metrics.metrics().get(metricName) == null) { sensor.add(metricName, stat); } final KafkaMetric metric = metrics.metrics().get(metricName); sensors.add(new TopicSensors.SensorMetric<ConsumerRecord>(sensor, metric, time, isError) { void record(final ConsumerRecord record) { sensor.record(recordValue.apply(record)); super.record(record); } }); }
@Test public void testThreadSafeAssignedPartitionsMetric() throws Exception { final Metric metric = metrics.metric(new MetricName("assigned-partitions", "consumer" + groupId + "-coordinator-metrics", "", Collections.<String, String>emptyMap()));
@Test public void testMeter() { Map<String, String> emptyTags = Collections.emptyMap(); MetricName rateMetricName = new MetricName("rate", "test", "", emptyTags); MetricName totalMetricName = new MetricName("total", "test", "", emptyTags); Meter meter = new Meter(rateMetricName, totalMetricName); List<NamedMeasurable> stats = meter.stats();
@Test public void testExpiredSensor() { MetricConfig config = new MetricConfig(); Time mockTime = new MockTime(); Metrics metrics = new Metrics(config, Arrays.asList((MetricsReporter) new JmxReporter()), mockTime, true); long inactiveSensorExpirationTimeSeconds = 60L; Sensor sensor = new Sensor(metrics, "sensor", null, config, mockTime, inactiveSensorExpirationTimeSeconds, Sensor.RecordingLevel.INFO); assertTrue(sensor.add(metrics.metricName("test1", "grp1"), new Avg())); Map<String, String> emptyTags = Collections.emptyMap(); MetricName rateMetricName = new MetricName("rate", "test", "", emptyTags); MetricName totalMetricName = new MetricName("total", "test", "", emptyTags); Meter meter = new Meter(rateMetricName, totalMetricName); assertTrue(sensor.add(meter)); mockTime.sleep(TimeUnit.SECONDS.toMillis(inactiveSensorExpirationTimeSeconds + 1)); assertFalse(sensor.add(metrics.metricName("test3", "grp1"), new Avg())); assertFalse(sensor.add(meter)); metrics.close(); }
private void registerCompressionRateGauge(Producer<byte[], byte[]> producer, HermesMetrics metrics, String gauge) { registerProducerGauge(producer, metrics, new MetricName("compression-rate-avg", "producer-metrics", "average compression rate", Collections.emptyMap()), gauge); }
private void registerAvailableBytesGauge(Producer<byte[], byte[]> producer, HermesMetrics metrics, String gauge) { registerProducerGauge(producer, metrics, new MetricName("buffer-available-bytes", "producer-metrics", "buffer available bytes", Collections.emptyMap()), gauge); }
private void registerTotalBytesGauge(Producer<byte[], byte[]> producer, HermesMetrics metrics, String gauge) { registerProducerGauge(producer, metrics, new MetricName("buffer-total-bytes", "producer-metrics", "buffer total bytes", Collections.emptyMap()), gauge); }
private void populateCurrentLags(Set<SystemStreamPartition> ssps) { Map<MetricName, ? extends Metric> consumerMetrics = kafkaConsumer.metrics(); // populate the MetricNames first time if (perPartitionMetrics.isEmpty()) { HashMap<String, String> tags = new HashMap<>(); tags.put("client-id", clientId); // this is required by the KafkaConsumer to get the metrics for (SystemStreamPartition ssp : ssps) { TopicPartition tp = KafkaSystemConsumer.toTopicPartition(ssp); perPartitionMetrics.put(ssp, new MetricName(tp + ".records-lag", "consumer-fetch-manager-metrics", "", tags)); } } for (SystemStreamPartition ssp : ssps) { MetricName mn = perPartitionMetrics.get(ssp); Metric currentLagMetric = consumerMetrics.get(mn); // High watermark is fixed to be the offset of last available message, // so the lag is now at least 0, which is the same as Samza's definition. // If the lag is not 0, then isAtHead is not true, and kafkaClient keeps polling. long currentLag = (currentLagMetric != null) ? (long) currentLagMetric.value() : -1L; latestLags.put(ssp, currentLag); // calls the setIsAtHead for the BlockingEnvelopeMap sink.setIsAtHighWatermark(ssp, currentLag == 0); } }
private void populateCurrentLags(Set<SystemStreamPartition> ssps) { Map<MetricName, ? extends Metric> consumerMetrics = kafkaConsumer.metrics(); // populate the MetricNames first time if (perPartitionMetrics.isEmpty()) { HashMap<String, String> tags = new HashMap<>(); tags.put("client-id", clientId); // this is required by the KafkaConsumer to get the metrics for (SystemStreamPartition ssp : ssps) { TopicPartition tp = KafkaSystemConsumer.toTopicPartition(ssp); perPartitionMetrics.put(ssp, new MetricName(tp + ".records-lag", "consumer-fetch-manager-metrics", "", tags)); } } for (SystemStreamPartition ssp : ssps) { MetricName mn = perPartitionMetrics.get(ssp); Metric currentLagMetric = consumerMetrics.get(mn); // High watermark is fixed to be the offset of last available message, // so the lag is now at least 0, which is the same as Samza's definition. // If the lag is not 0, then isAtHead is not true, and kafkaClient keeps polling. long currentLag = (currentLagMetric != null) ? (long) currentLagMetric.value() : -1L; latestLags.put(ssp, currentLag); // calls the setIsAtHead for the BlockingEnvelopeMap sink.setIsAtHighWatermark(ssp, currentLag == 0); } }
private void populateCurrentLags(Set<SystemStreamPartition> ssps) { Map<MetricName, ? extends Metric> consumerMetrics = kafkaConsumer.metrics(); // populate the MetricNames first time if (perPartitionMetrics.isEmpty()) { HashMap<String, String> tags = new HashMap<>(); tags.put("client-id", clientId); // this is required by the KafkaConsumer to get the metrics for (SystemStreamPartition ssp : ssps) { TopicPartition tp = KafkaSystemConsumer.toTopicPartition(ssp); perPartitionMetrics.put(ssp, new MetricName(tp + ".records-lag", "consumer-fetch-manager-metrics", "", tags)); } } for (SystemStreamPartition ssp : ssps) { MetricName mn = perPartitionMetrics.get(ssp); Metric currentLagMetric = consumerMetrics.get(mn); // High watermark is fixed to be the offset of last available message, // so the lag is now at least 0, which is the same as Samza's definition. // If the lag is not 0, then isAtHead is not true, and kafkaClient keeps polling. long currentLag = (currentLagMetric != null) ? (long) currentLagMetric.value() : -1L; latestLags.put(ssp, currentLag); // calls the setIsAtHead for the BlockingEnvelopeMap sink.setIsAtHighWatermark(ssp, currentLag == 0); } }
public static void addAvgMaxLatency(final Sensor sensor, final String group, final Map<String, String> tags, final String operation) { sensor.add( new MetricName( operation + "-latency-avg", group, "The average latency of " + operation + " operation.", tags), new Avg() ); sensor.add( new MetricName( operation + "-latency-max", group, "The max latency of " + operation + " operation.", tags), new Max() ); }
public StreamsMetricsImpl(final Metrics metrics, final String threadName) { Objects.requireNonNull(metrics, "Metrics cannot be null"); this.threadName = threadName; this.metrics = metrics; this.parentSensors = new HashMap<>(); final String group = "stream-metrics"; skippedRecordsSensor = threadLevelSensor("skipped-records", Sensor.RecordingLevel.INFO); skippedRecordsSensor.add(new MetricName("skipped-records-rate", group, "The average per-second number of skipped records", tagMap()), new Rate(TimeUnit.SECONDS, new Count())); skippedRecordsSensor.add(new MetricName("skipped-records-total", group, "The total number of skipped records", tagMap()), new Total()); }
public static void addInvocationRateAndCount(final Sensor sensor, final String group, final Map<String, String> tags, final String operation) { sensor.add( new MetricName( operation + "-rate", group, "The average number of occurrence of " + operation + " operation per second.", tags ), new Rate(TimeUnit.SECONDS, new Count()) ); sensor.add( new MetricName( operation + "-total", group, "The total number of occurrence of " + operation + " operations.", tags ), new CumulativeCount() ); }