public java.lang.Object getFieldValue(_Fields field) { switch (field) { case HOST: return get_host(); case UPTIME_SECS: return get_uptime_secs(); case NUM_WORKERS: return get_num_workers(); case NUM_USED_WORKERS: return get_num_used_workers(); case SUPERVISOR_ID: return get_supervisor_id(); case VERSION: return get_version(); case TOTAL_RESOURCES: return get_total_resources(); case USED_MEM: return get_used_mem(); case USED_CPU: return get_used_cpu(); case FRAGMENTED_MEM: return get_fragmented_mem(); case FRAGMENTED_CPU: return get_fragmented_cpu(); } throw new java.lang.IllegalStateException(); }
SupervisorSummary ret = new SupervisorSummary(info.get_hostname(), (int) info.get_uptime_secs(), numPorts, numUsedPorts, supervisorId); ret.set_total_resources(info.get_resources_map()); SupervisorResources resources = nodeIdToResources.get().get(supervisorId); if (resources != null) { ret.set_used_mem(resources.getUsedMem()); ret.set_used_cpu(resources.getUsedCpu()); if (isFragmented(resources)) { final double availableCpu = resources.getAvailableCpu(); LOG.warn("Negative fragmented CPU on {}", supervisorId); ret.set_fragmented_cpu(availableCpu); final double availableMem = resources.getAvailableMem(); if (availableMem < 0) { LOG.warn("Negative fragmented Mem on {}", supervisorId); ret.set_fragmented_mem(availableMem); ret.set_version(info.get_version());
private static Map<IClusterMetricsConsumer.SupervisorInfo, List<DataPoint>> extractSupervisorMetrics(ClusterSummary summ) { Map<IClusterMetricsConsumer.SupervisorInfo, List<DataPoint>> ret = new HashMap<>(); for (SupervisorSummary sup : summ.get_supervisors()) { IClusterMetricsConsumer.SupervisorInfo info = new IClusterMetricsConsumer.SupervisorInfo(sup.get_host(), sup.get_supervisor_id(), Time.currentTimeSecs()); List<DataPoint> metrics = new ArrayList<>(); metrics.add(new DataPoint("slotsTotal", sup.get_num_workers())); metrics.add(new DataPoint("slotsUsed", sup.get_num_used_workers())); metrics.add(new DataPoint("totalMem", sup.get_total_resources().get(Constants.COMMON_TOTAL_MEMORY_RESOURCE_NAME))); metrics.add(new DataPoint("totalCpu", sup.get_total_resources().get(Constants.COMMON_CPU_RESOURCE_NAME))); metrics.add(new DataPoint("usedMem", sup.get_used_mem())); metrics.add(new DataPoint("usedCpu", sup.get_used_cpu())); ret.put(info, metrics); } return ret; }
private void updateHistogram(ClusterSummary newSummary) { for (NimbusSummary nimbusSummary : newSummary.get_nimbuses()) { nimbusUptime.update(nimbusSummary.get_uptime_secs()); } for (SupervisorSummary summary : newSummary.get_supervisors()) { supervisorsUptime.update(summary.get_uptime_secs()); supervisorsNumWorkers.update(summary.get_num_workers()); supervisorsNumUsedWorkers.update(summary.get_num_used_workers()); supervisorsUsedMem.update(Math.round(summary.get_used_mem())); supervisorsUsedCpu.update(Math.round(summary.get_used_cpu())); supervisorsFragmentedMem.update(Math.round(summary.get_fragmented_mem())); supervisorsFragmentedCpu.update(Math.round(summary.get_fragmented_cpu())); } for (TopologySummary summary : newSummary.get_topologies()) { topologiesNumTasks.update(summary.get_num_tasks()); topologiesNumExecutors.update(summary.get_num_executors()); topologiesNumWorker.update(summary.get_num_workers()); topologiesUptime.update(summary.get_uptime_secs()); topologiesReplicationCount.update(summary.get_replication_count()); topologiesRequestedMemOnHeap.update(Math.round(summary.get_requested_memonheap())); topologiesRequestedMemOffHeap.update(Math.round(summary.get_requested_memoffheap())); topologiesRequestedCpu.update(Math.round(summary.get_requested_cpu())); topologiesAssignedMemOnHeap.update(Math.round(summary.get_assigned_memonheap())); topologiesAssignedMemOffHeap.update(Math.round(summary.get_assigned_memoffheap())); topologiesAssignedCpu.update(Math.round(summary.get_assigned_cpu())); } }
private static List<DataPoint> extractClusterMetrics(ClusterSummary summ) { List<DataPoint> ret = new ArrayList<>(); ret.add(new DataPoint("supervisors", summ.get_supervisors_size())); ret.add(new DataPoint("topologies", summ.get_topologies_size())); int totalSlots = 0; int usedSlots = 0; for (SupervisorSummary sup : summ.get_supervisors()) { usedSlots += sup.get_num_used_workers(); totalSlots += sup.get_num_workers(); } ret.add(new DataPoint("slotsTotal", totalSlots)); ret.add(new DataPoint("slotsUsed", usedSlots)); ret.add(new DataPoint("slotsFree", totalSlots - usedSlots)); int totalExecutors = 0; int totalTasks = 0; for (TopologySummary topo : summ.get_topologies()) { totalExecutors += topo.get_num_executors(); totalTasks += topo.get_num_tasks(); } ret.add(new DataPoint("executorsTotal", totalExecutors)); ret.add(new DataPoint("tasksTotal", totalTasks)); return ret; }
for (int _i478 = 0; _i478 < _list476.size; ++_i478) _elem477 = new SupervisorSummary(); _elem477.read(iprot); struct.supervisor_summaries.add(_elem477);
public SupervisorSummary deepCopy() { return new SupervisorSummary(this); }
master.addSupervisors(1); }else{ int oneSupervisorWorkersNum = stormCluster.get_supervisors().get(0).get_num_workers(); int numOfVacant = (totalNumWorkers - totalUsed) ; if(numOfVacant > oneSupervisorWorkersNum){
supervisorSummaries.stream().mapToDouble(x -> x.get_total_resources().getOrDefault( Constants.COMMON_TOTAL_MEMORY_RESOURCE_NAME, x.get_total_resources().get(Config.SUPERVISOR_MEMORY_CAPACITY_MB) supervisorSummaries.stream().mapToDouble(x -> x.get_total_resources().getOrDefault( Constants.COMMON_CPU_RESOURCE_NAME, x.get_total_resources().get(Config.SUPERVISOR_CPU_CAPACITY)
private int getFreeSlotNum(List<SupervisorSummary> summarys) { int num = 0; for (SupervisorSummary summary : summarys) { int used = summary.get_num_used_workers(); int total = summary.get_num_workers(); int free = total - used; num += free; } return num; }
for (int _i153 = 0; _i153 < _list151.size; ++_i153) _elem152 = new SupervisorSummary(); _elem152.read(iprot); struct.supervisors.add(_elem152);
/** * Performs a deep copy on <i>other</i>. */ public SupervisorPageInfo(SupervisorPageInfo other) { if (other.is_set_supervisor_summaries()) { java.util.List<SupervisorSummary> __this__supervisor_summaries = new java.util.ArrayList<SupervisorSummary>(other.supervisor_summaries.size()); for (SupervisorSummary other_element : other.supervisor_summaries) { __this__supervisor_summaries.add(new SupervisorSummary(other_element)); } this.supervisor_summaries = __this__supervisor_summaries; } if (other.is_set_worker_summaries()) { java.util.List<WorkerSummary> __this__worker_summaries = new java.util.ArrayList<WorkerSummary>(other.worker_summaries.size()); for (WorkerSummary other_element : other.worker_summaries) { __this__worker_summaries.add(new WorkerSummary(other_element)); } this.worker_summaries = __this__worker_summaries; } }
Map<String, Object> result = new HashMap(); result.put("id", supervisorSummary.get_supervisor_id()); result.put("host", supervisorSummary.get_host()); result.put("uptime", UIHelpers.prettyUptimeSec(supervisorSummary.get_uptime_secs())); result.put("uptimeSeconds", supervisorSummary.get_uptime_secs()); result.put("slotsTotal", supervisorSummary.get_num_workers()); result.put("slotsUsed", supervisorSummary.get_num_used_workers()); result.put("slotsFree", Integer.max(supervisorSummary.get_num_workers() - supervisorSummary.get_num_used_workers(), 0)); Map<String, Double> totalResources = supervisorSummary.get_total_resources(); Double totalMemory = totalResources.getOrDefault( Constants.COMMON_TOTAL_MEMORY_RESOURCE_NAME, result.put("totalCpu", totalCpu); result.put("usedMem", supervisorSummary.get_used_mem()); result.put("usedCpu", supervisorSummary.get_used_cpu()); result.put( "logLink", getSupervisorLogLink(supervisorSummary.get_host(), config) ); result.put("availMem", totalMemory - supervisorSummary.get_used_mem()); result.put("availCpu", totalCpu - supervisorSummary.get_used_cpu()); result.put("version", supervisorSummary.get_version()); return result;
int totalUsedSlots = 0; for (SupervisorSummary sup: summary.get_supervisors()) { totalSlots += sup.get_num_workers(); totalUsedSlots += sup.get_num_used_workers();
for (int _i468 = 0; _i468 < _list466.size; ++_i468) _elem467 = new SupervisorSummary(); _elem467.read(iprot); struct.supervisor_summaries.add(_elem467);
/** * Performs a deep copy on <i>other</i>. */ public ClusterSummary(ClusterSummary other) { if (other.is_set_supervisors()) { java.util.List<SupervisorSummary> __this__supervisors = new java.util.ArrayList<SupervisorSummary>(other.supervisors.size()); for (SupervisorSummary other_element : other.supervisors) { __this__supervisors.add(new SupervisorSummary(other_element)); } this.supervisors = __this__supervisors; } if (other.is_set_topologies()) { java.util.List<TopologySummary> __this__topologies = new java.util.ArrayList<TopologySummary>(other.topologies.size()); for (TopologySummary other_element : other.topologies) { __this__topologies.add(new TopologySummary(other_element)); } this.topologies = __this__topologies; } if (other.is_set_nimbuses()) { java.util.List<NimbusSummary> __this__nimbuses = new java.util.ArrayList<NimbusSummary>(other.nimbuses.size()); for (NimbusSummary other_element : other.nimbuses) { __this__nimbuses.add(new NimbusSummary(other_element)); } this.nimbuses = __this__nimbuses; } }
public Object getFieldValue(_Fields field) { switch (field) { case HOST: return get_host(); case UPTIME_SECS: return get_uptime_secs(); case NUM_WORKERS: return get_num_workers(); case NUM_USED_WORKERS: return get_num_used_workers(); case SUPERVISOR_ID: return get_supervisor_id(); case VERSION: return get_version(); case TOTAL_RESOURCES: return get_total_resources(); case USED_MEM: return get_used_mem(); case USED_CPU: return get_used_cpu(); } throw new IllegalStateException(); }
for (int _i138 = 0; _i138 < _list136.size; ++_i138) _elem137 = new SupervisorSummary(); _elem137.read(iprot); struct.supervisors.add(_elem137);
public SupervisorSummary deepCopy() { return new SupervisorSummary(this); }
for (int _i450 = 0; _i450 < _list448.size; ++_i450) _elem449 = new SupervisorSummary(); _elem449.read(iprot); struct.supervisor_summaries.add(_elem449);