@Override public void getMetrics(MetricsCollector collector, boolean b) { MetricsRecordBuilder rb = collector.addRecord(SchedulerMetrics) .setContext("scheduler") .tag(ProcessName, "DAGAppMaster") .tag(SessionId, sessionId); if (dagId != null) { rb.tag(MsInfo.Context, dagId); } getTaskSchedulerStats(rb); }
private void getTaskSchedulerStats(MetricsRecordBuilder rb) { rb.addGauge(SchedulerClusterNodeCount, clusterNodeCount.value()) .addGauge(SchedulerExecutorsPerInstance, numExecutors.value()) .addGauge(SchedulerMemoryPerInstance, memoryPerInstance.value()) .addGauge(SchedulerCpuCoresPerInstance, cpuCoresPerInstance.value()) .addGauge(SchedulerDisabledNodeCount, disabledNodeCount.value()) .addCounter(SchedulerPendingTaskCount, pendingTasksCount.value()) .addCounter(SchedulerSchedulableTaskCount, schedulableTasksCount.value()) .addCounter(SchedulerRunningTaskCount, runningTasksCount.value()) .addCounter(SchedulerSuccessfulTaskCount, successfulTasksCount.value()) .addCounter(SchedulerPendingPreemptionTaskCount, pendingPreemptionTasksCount.value()) .addCounter(SchedulerPreemptedTaskCount, preemptedTasksCount.value()) .addCounter(SchedulerCompletedDagCount, completedDagcount.value()); }
/** * Sample all the mutable metrics and put the snapshot in the builder * @param builder to contain the metrics snapshot * @param all get all the metrics even if the values are not changed. */ public void snapshot(MetricsRecordBuilder builder, boolean all) { for (MetricsTag tag : tags()) { builder.add(tag); } for (MutableMetric metric : metrics()) { metric.snapshot(builder, all); } }
@Test public void testHybrid() { HybridMetrics metrics = new HybridMetrics(); MetricsSource source = MetricsAnnotations.makeSource(metrics); assertSame(metrics, source); metrics.C0.incr(); MetricsRecordBuilder rb = getMetrics(source); MetricsCollector collector = rb.parent(); verify(collector).addRecord("foo"); verify(collector).addRecord("bar"); verify(collector).addRecord(info("HybridMetrics", "HybridMetrics")); verify(rb).setContext("foocontext"); verify(rb).addCounter(info("C1", "C1 desc"), 1); verify(rb).setContext("barcontext"); verify(rb).addGauge(info("G1", "G1 desc"), 1); verify(rb).add(tag(MsInfo.Context, "hybrid")); verify(rb).addCounter(info("C0", "C0 desc"), 1); verify(rb).addGauge(info("G0", "G0"), 0); }
builder.addGauge(info, (int) o); } else if (o instanceof Long) { builder.addGauge(info, (long) o); } else if (o instanceof Float) { builder.addGauge(info, (float) o); } else if (o instanceof Double) { builder.addGauge(info, (double) o); } else { LOG.trace("Ignoring Gauge ({}) with unhandled type: {}", gauge.getKey(), o.getClass()); Entry<String, Counter> counter = counterIterator.next(); MetricsInfo info = Interns.info(counter.getKey(), EMPTY_STRING); builder.addCounter(info, counter.getValue().getCount()); builder.tag(RATE_UNIT_LABEL, getRateUnit()); builder.tag(DURATION_UNIT_LABEL, getDurationUnit());
@Test public void testPresence() { JvmPauseMonitor pauseMonitor = new JvmPauseMonitor(new Configuration()); JvmMetrics jvmMetrics = new JvmMetrics("test", "test"); jvmMetrics.setPauseMonitor(pauseMonitor); MetricsRecordBuilder rb = getMetrics(jvmMetrics); MetricsCollector mc = rb.parent(); verify(mc).addRecord(JvmMetrics); verify(rb).tag(ProcessName, "test"); verify(rb).tag(SessionId, "test"); for (JvmMetricsInfo info : JvmMetricsInfo.values()) { if (info.name().startsWith("Mem")) verify(rb).addGauge(eq(info), anyFloat()); else if (info.name().startsWith("Gc")) verify(rb).addCounter(eq(info), anyLong()); else if (info.name().startsWith("Threads")) verify(rb).addGauge(eq(info), anyInt()); else if (info.name().startsWith("Log")) verify(rb).addCounter(eq(info), anyLong()); } } }
private void getMemoryUsage(MetricsRecordBuilder rb) { MemoryUsage memNonHeap = memoryMXBean.getNonHeapMemoryUsage(); MemoryUsage memHeap = memoryMXBean.getHeapMemoryUsage(); Runtime runtime = Runtime.getRuntime(); rb.addGauge(MemNonHeapUsedM, memNonHeap.getUsed() / M) .addGauge(MemNonHeapCommittedM, memNonHeap.getCommitted() / M) .addGauge(MemNonHeapMaxM, memNonHeap.getMax() / M) .addGauge(MemHeapUsedM, memHeap.getUsed() / M) .addGauge(MemHeapCommittedM, memHeap.getCommitted() / M) .addGauge(MemHeapMaxM, memHeap.getMax() / M) .addGauge(MemMaxM, runtime.maxMemory() / M); }
@Override public void snapshot(MetricsRecordBuilder rb, boolean all) { try { Object ret = method.invoke(obj, (Object[])null); if (isInt(type)) rb.addCounter(info, ((Integer) ret).intValue()); else rb.addCounter(info, ((Long) ret).longValue()); } catch (Exception ex) { LOG.error("Error invoking method "+ method.getName(), ex); } } };
@Override public void getMetrics(MetricsCollector collector, boolean all) { collector.addRecord("foo") .setContext("foocontext") .addCounter(info("C1", "C1 desc"), 1) .endRecord() .addRecord("bar") .setContext("barcontext") .addGauge(info("G1", "G1 desc"), 1); registry.snapshot(collector.addRecord(registry.info()), all); } }
@Override public void getMetrics(MetricsCollector collector, boolean all) { MetricsRecordBuilder builder = collector.addRecord(recordName); if (null != context) { builder.setContext(context); } // Synchronizing here ensures that the dropwizard metrics collection side is excluded from executing // at the same time we are pulling elements from the queues. synchronized (this) { snapshotAllMetrics(builder); } metrics2Registry.snapshot(builder, all); }
.setContext(context) .addGauge(Interns.info("Capacity", "Total storage capacity"), beanClass.getCapacity()) .addGauge(Interns.info("DfsUsed", "Total bytes used by dfs datanode"), beanClass.getDfsUsed()) .addGauge(Interns.info("Remaining", "Total bytes of free storage"), beanClass.getRemaining()) .add(new MetricsTag(Interns.info("StorageInfo", "Storage ID"), beanClass.getStorageInfo())) .addGauge(Interns.info("NumFailedVolumes", "Number of failed Volumes" + " in the data Node"), beanClass.getNumFailedVolumes()) .addGauge(Interns.info("LastVolumeFailureDate", "Last Volume failure in" + " milliseconds from epoch"), beanClass.getLastVolumeFailureDate()) .addGauge(Interns.info("EstimatedCapacityLostTotal", "Total capacity lost" + " due to volume failure"), beanClass.getEstimatedCapacityLostTotal()) .addGauge(Interns.info("CacheUsed", "Datanode cache used in bytes"), beanClass.getCacheUsed()) .addGauge(Interns.info("CacheCapacity", "Datanode cache capacity"), beanClass.getCacheCapacity()) .addGauge(Interns.info("NumBlocksCached", "Datanode number" + " of blocks cached"), beanClass.getNumBlocksCached()) .addGauge(Interns.info("NumBlocksFailedToCache", "Datanode number of " + "blocks failed to cache"), beanClass.getNumBlocksFailedToCache()) .addGauge(Interns.info("NumBlocksFailedToUnCache", "Datanode number of" + " blocks failed in cache eviction"), beanClass.getNumBlocksFailedToUncache());
.addGauge(Interns.info(MERGE_PLAN_COUNT_NAME, MERGE_PLAN_COUNT_DESC), masterWrapper.getMergePlanCount()) .addGauge(Interns.info(SPLIT_PLAN_COUNT_NAME, SPLIT_PLAN_COUNT_DESC), masterWrapper.getSplitPlanCount()) .addGauge(Interns.info(MASTER_ACTIVE_TIME_NAME, MASTER_ACTIVE_TIME_DESC), masterWrapper.getActiveTime()) .addGauge(Interns.info(MASTER_START_TIME_NAME, MASTER_START_TIME_DESC), masterWrapper.getStartTime()) .addGauge(Interns.info(MASTER_FINISHED_INITIALIZATION_TIME_NAME, MASTER_FINISHED_INITIALIZATION_TIME_DESC), masterWrapper.getMasterInitializationTime()) .addGauge(Interns.info(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC), masterWrapper.getAverageLoad()) .addGauge(Interns.info(ONLINE_REGION_COUNT_NAME, ONLINE_REGION_COUNT_DESC), regionNumberPair.getFirst()) .addGauge(Interns.info(OFFLINE_REGION_COUNT_NAME, OFFLINE_REGION_COUNT_DESC), regionNumberPair.getSecond()) .tag(Interns.info(LIVE_REGION_SERVERS_NAME, LIVE_REGION_SERVERS_DESC), masterWrapper.getRegionServers()) .addGauge(Interns.info(NUM_REGION_SERVERS_NAME, NUMBER_OF_REGION_SERVERS_DESC), masterWrapper.getNumRegionServers()) .tag(Interns.info(DEAD_REGION_SERVERS_NAME, DEAD_REGION_SERVERS_DESC), masterWrapper.getDeadRegionServers()) .addGauge(Interns.info(NUM_DEAD_REGION_SERVERS_NAME, NUMBER_OF_DEAD_REGION_SERVERS_DESC), masterWrapper.getNumDeadRegionServers()) .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC), masterWrapper.getZookeeperQuorum()) .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), masterWrapper.getServerName())
.addCounter(Interns.info(TOTAL_REQUEST_COUNT, TOTAL_REQUEST_COUNT_DESC), rsWrap.getTotalRequestCount()) .addCounter(Interns.info(TOTAL_ROW_ACTION_REQUEST_COUNT, TOTAL_ROW_ACTION_REQUEST_COUNT_DESC), rsWrap.getTotalRowActionRequestCount()) .addCounter(Interns.info(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC), rsWrap.getReadRequestsCount()) .addCounter(Interns.info(CP_REQUEST_COUNT, CP_REQUEST_COUNT_DESC), rsWrap.getCpRequestsCount()) .addCounter(Interns.info(FILTERED_READ_REQUEST_COUNT, FILTERED_READ_REQUEST_COUNT_DESC), rsWrap.getFilteredReadRequestsCount()) .addCounter(Interns.info(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC), rsWrap.getWriteRequestsCount()) .addCounter(Interns.info(RPC_GET_REQUEST_COUNT, RPC_GET_REQUEST_COUNT_DESC), rsWrap.getRpcGetRequestsCount()) .addCounter(Interns.info(RPC_SCAN_REQUEST_COUNT, RPC_SCAN_REQUEST_COUNT_DESC), rsWrap.getRpcScanRequestsCount()) .addCounter(Interns.info(RPC_MULTI_REQUEST_COUNT, RPC_MULTI_REQUEST_COUNT_DESC), rsWrap.getRpcMultiRequestsCount()) .addCounter(Interns.info(RPC_MUTATE_REQUEST_COUNT, RPC_MUTATE_REQUEST_COUNT_DESC), rsWrap.getRpcMutateRequestsCount()) .addCounter(Interns.info(CHECK_MUTATE_FAILED_COUNT, CHECK_MUTATE_FAILED_COUNT_DESC), rsWrap.getCheckAndMutateChecksFailed()) .addCounter(Interns.info(CHECK_MUTATE_PASSED_COUNT, CHECK_MUTATE_PASSED_COUNT_DESC), rsWrap.getCheckAndMutateChecksPassed()) .addCounter(Interns.info(BLOCK_CACHE_HIT_COUNT, BLOCK_CACHE_HIT_COUNT_DESC), rsWrap.getBlockCacheHitCount()) .addCounter(Interns.info(BLOCK_CACHE_PRIMARY_HIT_COUNT, BLOCK_CACHE_PRIMARY_HIT_COUNT_DESC), rsWrap.getBlockCachePrimaryHitCount()) .addCounter(Interns.info(BLOCK_CACHE_MISS_COUNT, BLOCK_COUNT_MISS_COUNT_DESC),
@Override public void getMetrics(MetricsCollector collector, boolean all) { MetricsRecordBuilder builder = collector.addRecord(RECORD).setContext(CONTEXT); // Update each MutableMetric with the new value snapshot(); // Add then all to the builder registry.snapshot(builder, all); // TODO Some day, MetricsRegistry will also support the MetricsGaugeDouble or allow us to // instantiate it directly builder.addGauge(Interns.info(FILES_PER_TABLET, "Number of files per tablet"), util.getAverageFilesPerTablet()); builder.addGauge(Interns.info(HOLD_TIME, "Time commits held"), util.getHoldTime()); builder.addGauge(Interns.info(INGEST_RATE, "Ingest rate (entries/sec)"), util.getIngest()); builder.addGauge(Interns.info(INGEST_BYTE_RATE, "Ingest rate (bytes/sec)"), util.getIngestByteRate()); builder.addGauge(Interns.info(QUERY_RATE, "Query rate (entries/sec)"), util.getQueryRate()); builder.addGauge(Interns.info(QUERY_BYTE_RATE, "Query rate (bytes/sec)"), util.getQueryByteRate()); builder.addGauge(Interns.info(SCANNED_RATE, "Scanned rate"), util.getScannedRate()); } }
/** * Flatten out the top window metrics into * {@link org.apache.hadoop.metrics2.MetricsRecord}s for consumption by * external metrics systems. Each metrics record added corresponds to the * reporting period a.k.a window length of the configured rolling windows. */ @Override public void getMetrics(MetricsCollector collector, boolean all) { if (!isMetricsSourceEnabled) { return; } for (final TopWindow window : getTopWindows()) { MetricsRecordBuilder rb = collector.addRecord(buildOpRecordName(window)) .setContext("dfs"); for (final Op op: window.getOps()) { rb.addCounter(buildOpTotalCountMetricsInfo(op), op.getTotalCount()); for (User user : op.getTopUsers()) { rb.addCounter(buildOpRecordMetricsInfo(op, user), user.getCount()); } } } }
@Override public void snapshot(MetricsRecordBuilder rb, boolean all) { try { Object ret = method.invoke(obj, (Object[]) null); rb.tag(info, (String) ret); } catch (Exception ex) { LOG.error("Error invoking method "+ method.getName(), ex); } } };
/** * Syntactic sugar to add multiple records in a collector in a one liner. * @return the parent metrics collector object */ public MetricsCollector endRecord() { return parent(); } }
@Test public void testHybrid() { HybridMetrics metrics = new HybridMetrics(); MetricsSource source = MetricsAnnotations.makeSource(metrics); assertSame(metrics, source); metrics.C0.incr(); MetricsRecordBuilder rb = getMetrics(source); MetricsCollector collector = rb.parent(); verify(collector).addRecord("foo"); verify(collector).addRecord("bar"); verify(collector).addRecord(info("HybridMetrics", "HybridMetrics")); verify(rb).setContext("foocontext"); verify(rb).addCounter(info("C1", "C1 desc"), 1); verify(rb).setContext("barcontext"); verify(rb).addGauge(info("G1", "G1 desc"), 1); verify(rb).add(tag(MsInfo.Context, "hybrid")); verify(rb).addCounter(info("C0", "C0 desc"), 1); verify(rb).addGauge(info("G0", "G0"), 0); }
@Test public void testMethods() { MyMetrics2 metrics = new MyMetrics2(); MetricsSource source = MetricsAnnotations.makeSource(metrics); MetricsRecordBuilder rb = getMetrics(source); verify(rb).addGauge(info("G1", "G1"), 1); verify(rb).addGauge(info("G2", "G2"), 2L); verify(rb).addGauge(info("G3", "G3"), 3.0f); verify(rb).addGauge(info("G4", "G4"), 4.0); verify(rb).addCounter(info("C1", "C1"), 1); verify(rb).addCounter(info("C2", "C2"), 2L); verify(rb).tag(info("T1", "T1"), "t1"); }