/** * Get the full set of internal metrics maintained by the producer. */ @Override public Map<MetricName, ? extends Metric> metrics() { return Collections.unmodifiableMap(this.metrics.metrics()); }
/** * Get the metrics kept by the consumer */ @Override public Map<MetricName, ? extends Metric> metrics() { return Collections.unmodifiableMap(this.metrics.metrics()); }
@Override public Map<MetricName, ? extends Metric> metrics() { return Collections.unmodifiableMap(this.metrics.metrics()); }
private KafkaMetric getMetric(String name) throws Exception { Optional<Map.Entry<MetricName, KafkaMetric>> metric = metrics.metrics().entrySet().stream() .filter(entry -> entry.getKey().name().equals(name)) .findFirst(); if (!metric.isPresent()) throw new Exception(String.format("Could not find metric called %s", name)); return metric.get().getValue(); } }
public double metricValue(String name) { for (Map.Entry<MetricName, KafkaMetric> entry : metrics.metrics().entrySet()) { if (entry.getKey().name().equals(name)) return (double) entry.getValue().metricValue(); } throw new IllegalStateException("Metric not found, " + name + ", found=" + metrics.metrics().keySet()); }
@Test public void testRemoveMetric() { int size = metrics.metrics().size(); metrics.addMetric(metrics.metricName("test1", "grp1"), new Count()); metrics.addMetric(metrics.metricName("test2", "grp1"), new Count()); assertNotNull(metrics.removeMetric(metrics.metricName("test1", "grp1"))); assertNull(metrics.metrics().get(metrics.metricName("test1", "grp1"))); assertNotNull(metrics.metrics().get(metrics.metricName("test2", "grp1"))); assertNotNull(metrics.removeMetric(metrics.metricName("test2", "grp1"))); assertNull(metrics.metrics().get(metrics.metricName("test2", "grp1"))); assertEquals(size, metrics.metrics().size()); }
@Test public void testFetchResponseMetricsPartialResponse() { subscriptions.assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 1); Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg)); KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg)); MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L); for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes()); MemoryRecords records = builder.build(); int expectedBytes = 0; for (Record record : records.records()) { if (record.offset() >= 1) expectedBytes += record.sizeInBytes(); } fetchRecords(tp0, records, Errors.NONE, 100L, 0); assertEquals(expectedBytes, (Double) fetchSizeAverage.metricValue(), EPSILON); assertEquals(2, (Double) recordsCountAverage.metricValue(), EPSILON); }
assertEquals(3, fetchedRecords.get(tp2).size()); Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg)); KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
@Test public void testFetcherMetrics() { subscriptions.assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); MetricName maxLagMetric = metrics.metricInstance(metricsRegistry.recordsLagMax); Map<String, String> tags = new HashMap<>(); tags.put("topic", tp0.topic()); tags.put("partition", String.valueOf(tp0.partition())); MetricName partitionLagMetric = metrics.metricName("records-lag", metricGroup, tags); Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric recordsFetchLagMax = allMetrics.get(maxLagMetric); // recordsFetchLagMax should be initialized to NaN assertEquals(Double.NaN, (Double) recordsFetchLagMax.metricValue(), EPSILON); // recordsFetchLagMax should be hw - fetchOffset after receiving an empty FetchResponse fetchRecords(tp0, MemoryRecords.EMPTY, Errors.NONE, 100L, 0); assertEquals(100, (Double) recordsFetchLagMax.metricValue(), EPSILON); KafkaMetric partitionLag = allMetrics.get(partitionLagMetric); assertEquals(100, (Double) partitionLag.metricValue(), EPSILON); // recordsFetchLagMax should be hw - offset of the last message after receiving a non-empty FetchResponse MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L); for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes()); fetchRecords(tp0, builder.build(), Errors.NONE, 200L, 0); assertEquals(197, (Double) recordsFetchLagMax.metricValue(), EPSILON); assertEquals(197, (Double) partitionLag.metricValue(), EPSILON); // verify de-registration of partition lag subscriptions.unsubscribe(); assertFalse(allMetrics.containsKey(partitionLagMetric)); }
MetricName partitionLeadMetric = metrics.metricName("records-lead", metricGroup, "", tags); Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric recordsFetchLeadMin = allMetrics.get(minLeadMetric);
@Test public void testSenderMetricsTemplates() throws Exception { metrics.close(); Map<String, String> clientTags = Collections.singletonMap("client-id", "clientA"); metrics = new Metrics(new MetricConfig().tags(clientTags)); SenderMetricsRegistry metricsRegistry = new SenderMetricsRegistry(metrics); Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, 1, metricsRegistry, time, REQUEST_TIMEOUT, 50, null, apiVersions); // Append a message so that topic metrics are created accumulator.append(tp0, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT); sender.run(time.milliseconds()); // connect sender.run(time.milliseconds()); // send produce request client.respond(produceResponse(tp0, 0, Errors.NONE, 0)); sender.run(time.milliseconds()); // Create throttle time metrics Sender.throttleTimeSensor(metricsRegistry); // Verify that all metrics except metrics-count have registered templates Set<MetricNameTemplate> allMetrics = new HashSet<>(); for (MetricName n : metrics.metrics().keySet()) { if (!n.group().equals("kafka-metrics-count")) allMetrics.add(new MetricNameTemplate(n.name(), n.group(), "", n.tags().keySet())); } TestUtils.checkEquals(allMetrics, new HashSet<>(metricsRegistry.allTemplates()), "metrics", "templates"); }
@Test public void testFetchResponseMetricsWithOnePartitionError() { subscriptions.assignFromUser(Utils.mkSet(tp0, tp1)); subscriptions.seek(tp0, 0); subscriptions.seek(tp1, 0); Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg)); KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg)); MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L); for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes()); MemoryRecords records = builder.build(); Map<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> partitions = new HashMap<>(); partitions.put(tp0, new FetchResponse.PartitionData<>(Errors.NONE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records)); partitions.put(tp1, new FetchResponse.PartitionData<>(Errors.OFFSET_OUT_OF_RANGE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, MemoryRecords.EMPTY)); assertEquals(1, fetcher.sendFetches()); client.prepareResponse(new FetchResponse<>(Errors.NONE, new LinkedHashMap<>(partitions), 0, INVALID_SESSION_ID)); consumerClient.poll(time.timer(0)); fetcher.fetchedRecords(); int expectedBytes = 0; for (Record record : records.records()) expectedBytes += record.sizeInBytes(); assertEquals(expectedBytes, (Double) fetchSizeAverage.metricValue(), EPSILON); assertEquals(3, (Double) recordsCountAverage.metricValue(), EPSILON); }
MetricName partitionLagMetric = metrics.metricName("records-lag", metricGroup, tags); Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric recordsFetchLagMax = allMetrics.get(maxLagMetric);
@Test public void testFetcherMetricsTemplates() throws Exception { metrics.close(); Map<String, String> clientTags = Collections.singletonMap("client-id", "clientA"); metrics = new Metrics(new MetricConfig().tags(clientTags)); metricsRegistry = new FetcherMetricsRegistry(clientTags.keySet(), "consumer" + groupId); fetcher.close(); fetcher = createFetcher(subscriptions, metrics); // Fetch from topic to generate topic metrics subscriptions.assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); assertEquals(1, fetcher.sendFetches()); client.prepareResponse(fullFetchResponse(tp0, this.records, Errors.NONE, 100L, 0)); consumerClient.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetcher.fetchedRecords(); assertTrue(partitionRecords.containsKey(tp0)); // Create throttle metrics Fetcher.throttleTimeSensor(metrics, metricsRegistry); // Verify that all metrics except metrics-count have registered templates Set<MetricNameTemplate> allMetrics = new HashSet<>(); for (MetricName n : metrics.metrics().keySet()) { String name = n.name().replaceAll(tp0.toString(), "{topic}-{partition}"); if (!n.group().equals("kafka-metrics-count")) allMetrics.add(new MetricNameTemplate(name, n.group(), "", n.tags().keySet())); } TestUtils.checkEquals(allMetrics, new HashSet<>(metricsRegistry.getAllTemplates()), "metrics", "templates"); }
assertNotNull("Sensor test.s1 must be present", metrics.getSensor("test.s1")); assertNotNull("MetricName test.s1.count must be present", metrics.metrics().get(metrics.metricName("test.s1.count", "grp1"))); assertNotNull("Sensor test.s2 must be present", metrics.getSensor("test.s2")); assertNotNull("MetricName test.s2.count must be present", metrics.metrics().get(metrics.metricName("test.s2.count", "grp1"))); assertNull("Sensor test.s1 should have been purged", metrics.getSensor("test.s1")); assertNull("MetricName test.s1.count should have been purged", metrics.metrics().get(metrics.metricName("test.s1.count", "grp1"))); assertNotNull("Sensor test.s2 must be present", metrics.getSensor("test.s2")); assertNotNull("MetricName test.s2.count must be present", metrics.metrics().get(metrics.metricName("test.s2.count", "grp1"))); assertNotNull("Sensor test.s2 must be present", metrics.getSensor("test.s2")); assertNotNull("MetricName test.s2.count must be present", metrics.metrics().get(metrics.metricName("test.s2.count", "grp1"))); assertNull("Sensor test.s2 should have been purged", metrics.getSensor("test.s1")); assertNull("MetricName test.s2.count should have been purged", metrics.metrics().get(metrics.metricName("test.s1.count", "grp1"))); assertNotNull("Sensor test.s1 must be present", metrics.getSensor("test.s1")); assertNotNull("MetricName test.s1.count must be present", metrics.metrics().get(metrics.metricName("test.s1.count", "grp1")));
subscriptions.seek(tp1, 0); Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg)); KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
s.add(new Meter(TimeUnit.SECONDS, rateMetricName, totalMetricName)); s.add(new Meter(TimeUnit.SECONDS, new Count(), countRateMetricName, countTotalMetricName)); KafkaMetric totalMetric = metrics.metrics().get(totalMetricName); KafkaMetric countTotalMetric = metrics.metrics().get(countTotalMetricName); KafkaMetric rateMetric = metrics.metrics().get(rateMetricName); KafkaMetric countRateMetric = metrics.metrics().get(countRateMetricName); assertEquals("Rate(0...2) = 2.666", sum / elapsedSecs, (Double) rateMetric.metricValue(), EPS); assertEquals("Count rate(0...2) = 0.02666", count / elapsedSecs, (Double) countRateMetric.metricValue(), EPS);
Sensor sensor = metrics.sensor("test", config); sensor.add(percs); Metric p25 = this.metrics.metrics().get(metrics.metricName("test.p25", "grp1")); Metric p50 = this.metrics.metrics().get(metrics.metricName("test.p50", "grp1")); Metric p75 = this.metrics.metrics().get(metrics.metricName("test.p75", "grp1"));
KafkaMetric recordErrors = m.metrics().get(senderMetrics.recordErrorRate); assertTrue("Expected non-zero value for record send errors", (Double) recordErrors.metricValue() > 0);
@Test public void testQuotas() { Sensor sensor = metrics.sensor("test"); sensor.add(metrics.metricName("test1.total", "grp1"), new Total(), new MetricConfig().quota(Quota.upperBound(5.0))); sensor.add(metrics.metricName("test2.total", "grp1"), new Total(), new MetricConfig().quota(Quota.lowerBound(0.0))); sensor.record(5.0); try { sensor.record(1.0); fail("Should have gotten a quota violation."); } catch (QuotaViolationException e) { // this is good } assertEquals(6.0, (Double) metrics.metrics().get(metrics.metricName("test1.total", "grp1")).metricValue(), EPS); sensor.record(-6.0); try { sensor.record(-1.0); fail("Should have gotten a quota violation."); } catch (QuotaViolationException e) { // this is good } }