/** * Create a MetricName with the given name, group, description and tags, plus default tags specified in the metric * configuration. Tag in tags takes precedence if the same tag key is specified in the default metric configuration. * * @param name The name of the metric * @param group logical group name of the metrics to which this metric belongs * @param description A human-readable description to include in the metric * @param tags additional key/value attributes of the metric */ public MetricName metricName(String name, String group, String description, Map<String, String> tags) { Map<String, String> combinedTag = new LinkedHashMap<>(config.tags()); combinedTag.putAll(tags); return new MetricName(name, group, description, combinedTag); }
public MetricName metricInstance(MetricNameTemplate template, Map<String, String> tags) { // check to make sure that the runtime defined tags contain all the template tags. Set<String> runtimeTagKeys = new HashSet<>(tags.keySet()); runtimeTagKeys.addAll(config().tags().keySet()); Set<String> templateTagKeys = template.tags(); if (!runtimeTagKeys.equals(templateTagKeys)) { throw new IllegalArgumentException("For '" + template.name() + "', runtime-defined metric tags do not match the tags in the template. " + "Runtime = " + runtimeTagKeys.toString() + " Template = " + templateTagKeys.toString()); } return this.metricName(template.name(), template.group(), template.description(), tags); }
public static void main(String[] args) { Map<String, String> metricTags = Collections.singletonMap("client-id", "client-id"); MetricConfig metricConfig = new MetricConfig().tags(metricTags); Metrics metrics = new Metrics(metricConfig); ProducerMetrics metricsRegistry = new ProducerMetrics(metrics); System.out.println(Metrics.toHtmlTable("kafka.producer", metricsRegistry.getAllTemplates())); }
private void setupWithTransactionState(TransactionManager transactionManager, boolean guaranteeOrder, BufferPool customPool) { long deliveryTimeoutMs = 1500L; long totalSize = 1024 * 1024; String metricGrpName = "producer-metrics"; MetricConfig metricConfig = new MetricConfig().tags(Collections.singletonMap("client-id", CLIENT_ID)); this.metrics = new Metrics(metricConfig, time); BufferPool pool = (customPool == null) ? new BufferPool(totalSize, batchSize, metrics, time, metricGrpName) : customPool; this.accumulator = new RecordAccumulator(logContext, batchSize, CompressionType.NONE, 0L, 0L, deliveryTimeoutMs, metrics, metricGrpName, time, apiVersions, transactionManager, pool); this.senderMetricsRegistry = new SenderMetricsRegistry(this.metrics); this.sender = new Sender(logContext, this.client, this.metadata, this.accumulator, guaranteeOrder, MAX_REQUEST_SIZE, ACKS_ALL, Integer.MAX_VALUE, this.senderMetricsRegistry, this.time, REQUEST_TIMEOUT, 50, transactionManager, apiVersions); this.client.updateMetadata(TestUtils.metadataUpdateWith(1, Collections.singletonMap("test", 2))); }
childTagsWithValues.put("child-tag", "child-tag-value"); try (Metrics inherited = new Metrics(new MetricConfig().tags(parentTagsWithValues), Arrays.asList((MetricsReporter) new JmxReporter()), time, true)) { MetricName inheritedMetric = inherited.metricInstance(SampleMetrics.METRIC_WITH_INHERITED_TAGS, childTagsWithValues);
@Before public void setup() { Map<String, String> metricTags = new LinkedHashMap<>(); metricTags.put("client-id", CLIENT_ID); int batchSize = 16 * 1024; long deliveryTimeoutMs = 3000L; long totalSize = 1024 * 1024; String metricGrpName = "producer-metrics"; MetricConfig metricConfig = new MetricConfig().tags(metricTags); this.brokerNode = new Node(0, "localhost", 2211); this.transactionManager = new TransactionManager(logContext, transactionalId, transactionTimeoutMs, DEFAULT_RETRY_BACKOFF_MS); Metrics metrics = new Metrics(metricConfig, time); SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(metrics); this.accumulator = new RecordAccumulator(logContext, batchSize, CompressionType.NONE, 0L, 0L, deliveryTimeoutMs, metrics, metricGrpName, time, apiVersions, transactionManager, new BufferPool(totalSize, batchSize, metrics, time, metricGrpName)); this.sender = new Sender(logContext, this.client, this.metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, MAX_RETRIES, senderMetrics, this.time, REQUEST_TIMEOUT, 50, transactionManager, apiVersions); this.client.updateMetadata(TestUtils.metadataUpdateWith(1, singletonMap("test", 2))); }
@Test public void testSenderMetricsTemplates() throws Exception { metrics.close(); Map<String, String> clientTags = Collections.singletonMap("client-id", "clientA"); metrics = new Metrics(new MetricConfig().tags(clientTags)); SenderMetricsRegistry metricsRegistry = new SenderMetricsRegistry(metrics); Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, 1, metricsRegistry, time, REQUEST_TIMEOUT, 50, null, apiVersions); // Append a message so that topic metrics are created accumulator.append(tp0, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT); sender.run(time.milliseconds()); // connect sender.run(time.milliseconds()); // send produce request client.respond(produceResponse(tp0, 0, Errors.NONE, 0)); sender.run(time.milliseconds()); // Create throttle time metrics Sender.throttleTimeSensor(metricsRegistry); // Verify that all metrics except metrics-count have registered templates Set<MetricNameTemplate> allMetrics = new HashSet<>(); for (MetricName n : metrics.metrics().keySet()) { if (!n.group().equals("kafka-metrics-count")) allMetrics.add(new MetricNameTemplate(n.name(), n.group(), "", n.tags().keySet())); } TestUtils.checkEquals(allMetrics, new HashSet<>(metricsRegistry.allTemplates()), "metrics", "templates"); }
@Test public void testFetcherMetricsTemplates() throws Exception { metrics.close(); Map<String, String> clientTags = Collections.singletonMap("client-id", "clientA"); metrics = new Metrics(new MetricConfig().tags(clientTags)); metricsRegistry = new FetcherMetricsRegistry(clientTags.keySet(), "consumer" + groupId); fetcher.close(); fetcher = createFetcher(subscriptions, metrics); // Fetch from topic to generate topic metrics subscriptions.assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); assertEquals(1, fetcher.sendFetches()); client.prepareResponse(fullFetchResponse(tp0, this.records, Errors.NONE, 100L, 0)); consumerClient.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetcher.fetchedRecords(); assertTrue(partitionRecords.containsKey(tp0)); // Create throttle metrics Fetcher.throttleTimeSensor(metrics, metricsRegistry); // Verify that all metrics except metrics-count have registered templates Set<MetricNameTemplate> allMetrics = new HashSet<>(); for (MetricName n : metrics.metrics().keySet()) { String name = n.name().replaceAll(tp0.toString(), "{topic}-{partition}"); if (!n.group().equals("kafka-metrics-count")) allMetrics.add(new MetricNameTemplate(name, n.group(), "", n.tags().keySet())); } TestUtils.checkEquals(allMetrics, new HashSet<>(metricsRegistry.getAllTemplates()), "metrics", "templates"); }
.timeWindow(config.getLong(ProducerConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS) .recordLevel(Sensor.RecordingLevel.forName(config.getString(ProducerConfig.METRICS_RECORDING_LEVEL_CONFIG))) .tags(metricTags); List<MetricsReporter> reporters = config.getConfiguredInstances(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, MetricsReporter.class,
public SenderMetricsRegistry(Metrics metrics) { this.metrics = metrics; this.tags = this.metrics.config().tags().keySet(); this.allTemplates = new ArrayList<>();
.timeWindow(config.getLong(ConsumerConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS) .recordLevel(Sensor.RecordingLevel.forName(config.getString(ConsumerConfig.METRICS_RECORDING_LEVEL_CONFIG))) .tags(metricsTags); List<MetricsReporter> reporters = config.getConfiguredInstances(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, MetricsReporter.class, Collections.singletonMap(ConsumerConfig.CLIENT_ID_CONFIG, clientId));
.timeWindow(config.getLong(AdminClientConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS) .recordLevel(Sensor.RecordingLevel.forName(config.getString(AdminClientConfig.METRICS_RECORDING_LEVEL_CONFIG))) .tags(metricTags); reporters.add(new JmxReporter(JMX_PREFIX)); metrics = new Metrics(metricConfig, reporters, time);
MetricConfig metricConfig = new MetricConfig().samples(config.getInt(CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG)) .timeWindow(config.getLong(CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS) .tags(metricsTags); List<MetricsReporter> reporters = config.getConfiguredInstances(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG, MetricsReporter.class); reporters.add(new JmxReporter(JMX_PREFIX));
MetricConfig metricConfig = new MetricConfig().samples(config.getInt(CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG)) .timeWindow(config.getLong(CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS) .tags(metricsTags); List<MetricsReporter> reporters = config.getConfiguredInstances(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG, MetricsReporter.class,