public double value() { return (Double) metric.metricValue(); }
@Override public Object getAttribute(String name) throws AttributeNotFoundException, MBeanException, ReflectionException { if (this.metrics.containsKey(name)) return this.metrics.get(name).metricValue(); else throw new AttributeNotFoundException("Could not find attribute " + name); }
public double metricValue(String name) { for (Map.Entry<MetricName, KafkaMetric> entry : metrics.metrics().entrySet()) { if (entry.getKey().name().equals(name)) return (double) entry.getValue().metricValue(); } throw new IllegalStateException("Metric not found, " + name + ", found=" + metrics.metrics().keySet()); }
@Test public void testSimpleStats() throws Exception { verifyStats(m -> (double) m.metricValue()); }
@Test public void shouldGetMetricValueCorrectly() { // Given: final TopicSensors.SensorMetric sensorMetric = new SensorMetric(sensor, metric, time, false); // When: when(metric.metricValue()).thenReturn(1.2345); // Then: assertThat(sensorMetric.value(), equalTo(1.2345)); verify(metric).metricValue(); } }
@Test public void testOutboundConnectionsCountInConnectionCreationMetric() throws Exception { // create connections int expectedConnections = 5; InetSocketAddress addr = new InetSocketAddress("localhost", server.port); for (int i = 0; i < expectedConnections; i++) connect(Integer.toString(i), addr); // Poll continuously, as we cannot guarantee that the first call will see all connections int seenConnections = 0; for (int i = 0; i < 10; i++) { selector.poll(100L); seenConnections += selector.connected().size(); if (seenConnections == expectedConnections) break; } assertEquals((double) expectedConnections, getMetric("connection-creation-total").metricValue()); assertEquals((double) expectedConnections, getMetric("connection-count").metricValue()); }
@Test public void testInboundConnectionsCountInConnectionCreationMetric() throws Exception { int conns = 5; try (ServerSocketChannel ss = ServerSocketChannel.open()) { ss.bind(new InetSocketAddress(0)); InetSocketAddress serverAddress = (InetSocketAddress) ss.getLocalAddress(); for (int i = 0; i < conns; i++) { Thread sender = createSender(serverAddress, randomPayload(1)); sender.start(); SocketChannel channel = ss.accept(); channel.configureBlocking(false); selector.register(Integer.toString(i), channel); } } assertEquals((double) conns, getMetric("connection-creation-total").metricValue()); assertEquals((double) conns, getMetric("connection-count").metricValue()); }
@Test public void testFetcherMetrics() { subscriptions.assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); MetricName maxLagMetric = metrics.metricInstance(metricsRegistry.recordsLagMax); Map<String, String> tags = new HashMap<>(); tags.put("topic", tp0.topic()); tags.put("partition", String.valueOf(tp0.partition())); MetricName partitionLagMetric = metrics.metricName("records-lag", metricGroup, tags); Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric recordsFetchLagMax = allMetrics.get(maxLagMetric); // recordsFetchLagMax should be initialized to NaN assertEquals(Double.NaN, (Double) recordsFetchLagMax.metricValue(), EPSILON); // recordsFetchLagMax should be hw - fetchOffset after receiving an empty FetchResponse fetchRecords(tp0, MemoryRecords.EMPTY, Errors.NONE, 100L, 0); assertEquals(100, (Double) recordsFetchLagMax.metricValue(), EPSILON); KafkaMetric partitionLag = allMetrics.get(partitionLagMetric); assertEquals(100, (Double) partitionLag.metricValue(), EPSILON); // recordsFetchLagMax should be hw - offset of the last message after receiving a non-empty FetchResponse MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L); for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes()); fetchRecords(tp0, builder.build(), Errors.NONE, 200L, 0); assertEquals(197, (Double) recordsFetchLagMax.metricValue(), EPSILON); assertEquals(197, (Double) partitionLag.metricValue(), EPSILON); // verify de-registration of partition lag subscriptions.unsubscribe(); assertFalse(allMetrics.containsKey(partitionLagMetric)); }
/** * Verifies that concurrent sensor add, remove, updates and read don't result * in errors or deadlock. */ @Test public void testConcurrentReadUpdate() throws Exception { final Random random = new Random(); final Deque<Sensor> sensors = new ConcurrentLinkedDeque<>(); metrics = new Metrics(new MockTime(10)); SensorCreator sensorCreator = new SensorCreator(metrics); final AtomicBoolean alive = new AtomicBoolean(true); executorService = Executors.newSingleThreadExecutor(); executorService.submit(new ConcurrentMetricOperation(alive, "record", () -> sensors.forEach(sensor -> sensor.record(random.nextInt(10000))))); for (int i = 0; i < 10000; i++) { if (sensors.size() > 5) { Sensor sensor = random.nextBoolean() ? sensors.removeFirst() : sensors.removeLast(); metrics.removeSensor(sensor.name()); } StatType statType = StatType.forId(random.nextInt(StatType.values().length)); sensors.add(sensorCreator.createSensor(statType, i)); for (Sensor sensor : sensors) { for (KafkaMetric metric : sensor.metrics()) { assertNotNull("Invalid metric value", metric.metricValue()); } } } alive.set(false); }
assertEquals(Double.NaN, (Double) recordsFetchLeadMin.metricValue(), EPSILON); assertEquals(0L, (Double) recordsFetchLeadMin.metricValue(), EPSILON); assertEquals(0L, (Double) partitionLead.metricValue(), EPSILON); assertEquals(0L, (Double) recordsFetchLeadMin.metricValue(), EPSILON); assertEquals(3L, (Double) partitionLead.metricValue(), EPSILON);
assertEquals(Double.NaN, (Double) recordsFetchLagMax.metricValue(), EPSILON); assertEquals(50, (Double) recordsFetchLagMax.metricValue(), EPSILON); assertEquals(50, (Double) partitionLag.metricValue(), EPSILON); builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes()); fetchRecords(tp0, builder.build(), Errors.NONE, 200L, 150L, 0); assertEquals(147, (Double) recordsFetchLagMax.metricValue(), EPSILON); assertEquals(147, (Double) partitionLag.metricValue(), EPSILON);
@Test public void testFetchResponseMetricsPartialResponse() { subscriptions.assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 1); Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg)); KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg)); MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L); for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes()); MemoryRecords records = builder.build(); int expectedBytes = 0; for (Record record : records.records()) { if (record.offset() >= 1) expectedBytes += record.sizeInBytes(); } fetchRecords(tp0, records, Errors.NONE, 100L, 0); assertEquals(expectedBytes, (Double) fetchSizeAverage.metricValue(), EPSILON); assertEquals(2, (Double) recordsCountAverage.metricValue(), EPSILON); }
KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg)); KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg)); assertEquals(expectedBytes, (Double) fetchSizeAverage.metricValue(), EPSILON); assertEquals(6, (Double) recordsCountAverage.metricValue(), EPSILON);
@Test public void testFetchResponseMetricsWithOnePartitionError() { subscriptions.assignFromUser(Utils.mkSet(tp0, tp1)); subscriptions.seek(tp0, 0); subscriptions.seek(tp1, 0); Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg)); KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg)); MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L); for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes()); MemoryRecords records = builder.build(); Map<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> partitions = new HashMap<>(); partitions.put(tp0, new FetchResponse.PartitionData<>(Errors.NONE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records)); partitions.put(tp1, new FetchResponse.PartitionData<>(Errors.OFFSET_OUT_OF_RANGE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, MemoryRecords.EMPTY)); assertEquals(1, fetcher.sendFetches()); client.prepareResponse(new FetchResponse<>(Errors.NONE, new LinkedHashMap<>(partitions), 0, INVALID_SESSION_ID)); consumerClient.poll(time.timer(0)); fetcher.fetchedRecords(); int expectedBytes = 0; for (Record record : records.records()) expectedBytes += record.sizeInBytes(); assertEquals(expectedBytes, (Double) fetchSizeAverage.metricValue(), EPSILON); assertEquals(3, (Double) recordsCountAverage.metricValue(), EPSILON); }
sum += 100; time.sleep(cfg.timeWindowMs()); assertEquals(sum, (Double) totalMetric.metricValue(), EPS); assertEquals("Rate(0...2) = 2.666", sum / elapsedSecs, (Double) rateMetric.metricValue(), EPS); assertEquals("Count rate(0...2) = 0.02666", count / elapsedSecs, (Double) countRateMetric.metricValue(), EPS); assertEquals("Elapsed Time = 75 seconds", elapsedSecs, ((Rate) rateMetric.measurable()).windowSize(cfg, time.milliseconds()) / 1000, EPS); assertEquals(sum, (Double) totalMetric.metricValue(), EPS); assertEquals(count, (Double) countTotalMetric.metricValue(), EPS); assertEquals(0, (Double) rateMetric.metricValue(), EPS); assertEquals(0, (Double) countRateMetric.metricValue(), EPS); assertEquals(sum, (Double) totalMetric.metricValue(), EPS); assertEquals(count, (Double) countTotalMetric.metricValue(), EPS);
expectedBytes += record.sizeInBytes(); assertEquals(expectedBytes, (Double) fetchSizeAverage.metricValue(), EPSILON); assertEquals(3, (Double) recordsCountAverage.metricValue(), EPSILON);
grandchild.record(); double p1 = (double) parent1.metrics().get(0).metricValue(); double p2 = (double) parent2.metrics().get(0).metricValue(); double c1 = (double) child1.metrics().get(0).metricValue(); double c2 = (double) child2.metrics().get(0).metricValue(); double gc = (double) grandchild.metrics().get(0).metricValue();
assertTrue("Expected non-zero value for record send errors", (Double) recordErrors.metricValue() > 0);
@Test public void testQuotas() { Sensor sensor = metrics.sensor("test"); sensor.add(metrics.metricName("test1.total", "grp1"), new Total(), new MetricConfig().quota(Quota.upperBound(5.0))); sensor.add(metrics.metricName("test2.total", "grp1"), new Total(), new MetricConfig().quota(Quota.lowerBound(0.0))); sensor.record(5.0); try { sensor.record(1.0); fail("Should have gotten a quota violation."); } catch (QuotaViolationException e) { // this is good } assertEquals(6.0, (Double) metrics.metrics().get(metrics.metricName("test1.total", "grp1")).metricValue(), EPS); sensor.record(-6.0); try { sensor.record(-1.0); fail("Should have gotten a quota violation."); } catch (QuotaViolationException e) { // this is good } }
KafkaMetric maxMetric = allMetrics.get(this.senderMetricsRegistry.produceThrottleTimeMax); assertEquals(250, (Double) avgMetric.metricValue(), EPS); assertEquals(400, (Double) maxMetric.metricValue(), EPS); client.close();