private void updateHistogram(ClusterSummary newSummary) { for (NimbusSummary nimbusSummary : newSummary.get_nimbuses()) { nimbusUptime.update(nimbusSummary.get_uptime_secs()); } for (SupervisorSummary summary : newSummary.get_supervisors()) { supervisorsUptime.update(summary.get_uptime_secs()); supervisorsNumWorkers.update(summary.get_num_workers()); supervisorsNumUsedWorkers.update(summary.get_num_used_workers()); supervisorsUsedMem.update(Math.round(summary.get_used_mem())); supervisorsUsedCpu.update(Math.round(summary.get_used_cpu())); supervisorsFragmentedMem.update(Math.round(summary.get_fragmented_mem())); supervisorsFragmentedCpu.update(Math.round(summary.get_fragmented_cpu())); } for (TopologySummary summary : newSummary.get_topologies()) { topologiesNumTasks.update(summary.get_num_tasks()); topologiesNumExecutors.update(summary.get_num_executors()); topologiesNumWorker.update(summary.get_num_workers()); topologiesUptime.update(summary.get_uptime_secs()); topologiesReplicationCount.update(summary.get_replication_count()); topologiesRequestedMemOnHeap.update(Math.round(summary.get_requested_memonheap())); topologiesRequestedMemOffHeap.update(Math.round(summary.get_requested_memoffheap())); topologiesRequestedCpu.update(Math.round(summary.get_requested_cpu())); topologiesAssignedMemOnHeap.update(Math.round(summary.get_assigned_memonheap())); topologiesAssignedMemOffHeap.update(Math.round(summary.get_assigned_memoffheap())); topologiesAssignedCpu.update(Math.round(summary.get_assigned_cpu())); } }
private static Map<IClusterMetricsConsumer.SupervisorInfo, List<DataPoint>> extractSupervisorMetrics(ClusterSummary summ) { Map<IClusterMetricsConsumer.SupervisorInfo, List<DataPoint>> ret = new HashMap<>(); for (SupervisorSummary sup : summ.get_supervisors()) { IClusterMetricsConsumer.SupervisorInfo info = new IClusterMetricsConsumer.SupervisorInfo(sup.get_host(), sup.get_supervisor_id(), Time.currentTimeSecs()); List<DataPoint> metrics = new ArrayList<>(); metrics.add(new DataPoint("slotsTotal", sup.get_num_workers())); metrics.add(new DataPoint("slotsUsed", sup.get_num_used_workers())); metrics.add(new DataPoint("totalMem", sup.get_total_resources().get(Constants.COMMON_TOTAL_MEMORY_RESOURCE_NAME))); metrics.add(new DataPoint("totalCpu", sup.get_total_resources().get(Constants.COMMON_CPU_RESOURCE_NAME))); metrics.add(new DataPoint("usedMem", sup.get_used_mem())); metrics.add(new DataPoint("usedCpu", sup.get_used_cpu())); ret.put(info, metrics); } return ret; }
totalCpu); result.put("usedMem", supervisorSummary.get_used_mem()); result.put("usedCpu", supervisorSummary.get_used_cpu()); result.put( "logLink", ); result.put("availMem", totalMemory - supervisorSummary.get_used_mem()); result.put("availCpu", totalCpu - supervisorSummary.get_used_cpu()); result.put("version", supervisorSummary.get_version()); return result;
public java.lang.Object getFieldValue(_Fields field) { switch (field) { case HOST: return get_host(); case UPTIME_SECS: return get_uptime_secs(); case NUM_WORKERS: return get_num_workers(); case NUM_USED_WORKERS: return get_num_used_workers(); case SUPERVISOR_ID: return get_supervisor_id(); case VERSION: return get_version(); case TOTAL_RESOURCES: return get_total_resources(); case USED_MEM: return get_used_mem(); case USED_CPU: return get_used_cpu(); case FRAGMENTED_MEM: return get_fragmented_mem(); case FRAGMENTED_CPU: return get_fragmented_cpu(); } throw new java.lang.IllegalStateException(); }
public Object getFieldValue(_Fields field) { switch (field) { case HOST: return get_host(); case UPTIME_SECS: return get_uptime_secs(); case NUM_WORKERS: return get_num_workers(); case NUM_USED_WORKERS: return get_num_used_workers(); case SUPERVISOR_ID: return get_supervisor_id(); case VERSION: return get_version(); case TOTAL_RESOURCES: return get_total_resources(); case USED_MEM: return get_used_mem(); case USED_CPU: return get_used_cpu(); } throw new IllegalStateException(); }