/** * filter ExecutorSummary whose stats is null. * * @param summs a list of ExecutorSummary * @return filtered summs */ public static List<ExecutorSummary> getFilledStats(List<ExecutorSummary> summs) { List<ExecutorSummary> ret = new ArrayList<>(); for (ExecutorSummary summ : summs) { if (summ.get_stats() != null) { ret.add(summ); } } return ret; }
/** * aggregate common stats from a spout/bolt, called in aggregateSpoutStats/aggregateBoltStats. */ public static <T> Map<String, Map<String, Map<T, Long>>> aggregateCommonStats(List<ExecutorSummary> statsSeq) { Map<String, Map<String, Map<T, Long>>> ret = new HashMap<>(); List<Map<String, Map<String, Long>>> emitted = new ArrayList<>(); List<Map<String, Map<String, Long>>> transferred = new ArrayList<>(); for (ExecutorSummary summ : statsSeq) { emitted.add(summ.get_stats().get_emitted()); transferred.add(summ.get_stats().get_transferred()); } ((Map) ret).put(EMITTED, aggregateCounts(emitted)); ((Map) ret).put(TRANSFERRED, aggregateCounts(transferred)); return ret; }
private static List<Double> extractBoltValues(List<ExecutorSummary> summaries, GlobalStreamId id, Function<BoltStats, Map<String, Map<GlobalStreamId, Double>>> func) { List<Double> ret = new ArrayList<>(); if (summaries != null) { for (ExecutorSummary summ : summaries) { if (summ != null && summ.is_set_stats()) { Map<String, Map<GlobalStreamId, Double>> data = func.apply(summ.get_stats().get_specific().get_bolt()); if (data != null) { List<Double> subvalues = data.values().stream() .map((subMap) -> subMap.get(id)) .filter((value) -> value != null) .collect(Collectors.toList()); ret.addAll(subvalues); } } } } return ret; }
/** * getStatMapFromExecutorSummary. * @param executorSummary executorSummary * @return getStatMapFromExecutorSummary */ public static Map<String, Object> getStatMapFromExecutorSummary(ExecutorSummary executorSummary) { Map<String, Object> result = new HashMap(); result.put(":host", executorSummary.get_host()); result.put(":port", executorSummary.get_port()); result.put(":uptime_secs", executorSummary.get_uptime_secs()); result.put(":transferred", null); if (executorSummary.is_set_stats()) { result.put(":transferred", sanitizeTransferredStats(executorSummary.get_stats().get_transferred())); } return result; }
public java.lang.Object getFieldValue(_Fields field) { switch (field) { case EXECUTOR_INFO: return get_executor_info(); case COMPONENT_ID: return get_component_id(); case HOST: return get_host(); case PORT: return get_port(); case UPTIME_SECS: return get_uptime_secs(); case STATS: return get_stats(); } throw new java.lang.IllegalStateException(); }
/** * aggregate spout stats. * * @param statsSeq a seq of ExecutorStats * @param includeSys whether to include system streams * @return aggregated spout stats: {metric -> win -> global stream id -> value} */ public static Map<String, Map> aggregateSpoutStats(List<ExecutorSummary> statsSeq, boolean includeSys) { // actually Map<String, Map<String, Map<String, Long/Double>>> Map<String, Map> ret = new HashMap<>(); Map<String, Map<String, Map<String, Long>>> commonStats = aggregateCommonStats(statsSeq); // filter sys streams if necessary commonStats = preProcessStreamSummary(commonStats, includeSys); List<Map<String, Map<String, Long>>> acked = new ArrayList<>(); List<Map<String, Map<String, Long>>> failed = new ArrayList<>(); List<Map<String, Map<String, Double>>> completeLatencies = new ArrayList<>(); for (ExecutorSummary summary : statsSeq) { ExecutorStats stats = summary.get_stats(); acked.add(stats.get_specific().get_spout().get_acked()); failed.add(stats.get_specific().get_spout().get_failed()); completeLatencies.add(stats.get_specific().get_spout().get_complete_ms_avg()); } ret.putAll(commonStats); ((Map) ret).put(ACKED, aggregateCounts(acked)); ((Map) ret).put(FAILED, aggregateCounts(failed)); ((Map) ret).put(COMP_LATENCIES, aggregateAverages(completeLatencies, acked)); return ret; }
for (ExecutorSummary exec : info.get_executors()) { if ("spout".equals(exec.get_component_id())) { SpoutStats stats = exec.get_stats().get_specific().get_spout(); Map<String, Long> failedMap = stats.get_failed().get(":all-time"); Map<String, Long> ackedMap = stats.get_acked().get(":all-time");
ExecutorStats executorStats = executorSummary.get_stats(); if (executorStats == null) { continue;
for (ExecutorSummary exec : info.get_executors()) { if ("spout".equals(exec.get_component_id())) { SpoutStats stats = exec.get_stats().get_specific().get_spout(); Map<String, Long> failedMap = stats.get_failed().get(":all-time"); Map<String, Long> ackedMap = stats.get_acked().get(":all-time");
for (ExecutorSummary exec : info.get_executors()) { if ("spout".equals(exec.get_component_id())) { SpoutStats stats = exec.get_stats().get_specific().get_spout(); Map<String, Long> failedMap = stats.get_failed().get(":all-time"); Map<String, Long> ackedMap = stats.get_acked().get(":all-time");
workers.add(exec.get_host() + exec.get_port()); executors++; if (exec.get_stats() != null && exec.get_stats().get_specific() != null && exec.get_stats().get_specific().is_set_spout()) { SpoutStats stats = exec.get_stats().get_specific().get_spout(); Map<String, Long> failedMap = stats.get_failed().get(":all-time"); Map<String, Long> ackedMap = stats.get_acked().get(":all-time");
/** * Compute the capacity of a executor. approximation of the % of time spent doing real work. * @param summary the stats for the executor. * @return the capacity of the executor. */ public static double computeExecutorCapacity(ExecutorSummary summary) { ExecutorStats stats = summary.get_stats(); if (stats == null) { return 0.0; } else { // actual value of m is: Map<String, Map<String/GlobalStreamId, Long/Double>> ({win -> stream -> value}) Map<String, Map> m = aggregateBoltStats(Lists.newArrayList(summary), true); // {metric -> win -> value} ==> {win -> metric -> value} m = swapMapOrder(aggregateBoltStreams(m)); // {metric -> value} Map data = ClientStatsUtil.getMapByKey(m, TEN_MIN_IN_SECONDS_STR); int uptime = summary.get_uptime_secs(); int win = Math.min(uptime, TEN_MIN_IN_SECONDS); long executed = getByKeyOr0(data, EXECUTED).longValue(); double latency = getByKeyOr0(data, EXEC_LATENCIES).doubleValue(); if (win > 0) { return executed * latency / (1000 * win); } return 0.0; } }
List<Map<String, Map<GlobalStreamId, Double>>> executeLatencies = new ArrayList<>(); for (ExecutorSummary summary : statsSeq) { ExecutorStats stat = summary.get_stats(); acked.add(stat.get_specific().get_bolt().get_acked()); failed.add(stat.get_specific().get_bolt().get_failed());
if (_component.equals(es.get_component_id())) { componentParallelism++; ExecutorStats stats = es.get_stats(); if (stats != null) { Map<String, Map<String, Long>> statted =
LOG.debug("Getting uptime for worker {}, {}", key, uptime); for (Map.Entry<String, Map<String, Long>> statEntry : summary.get_stats().get_emitted().entrySet()) { String timeWindow = statEntry.getKey(); long timeSecs = uptime;
public Object getFieldValue(_Fields field) { switch (field) { case EXECUTOR_INFO: return get_executor_info(); case COMPONENT_ID: return get_component_id(); case HOST: return get_host(); case PORT: return get_port(); case UPTIME_SECS: return get_uptime_secs(); case STATS: return get_stats(); } throw new IllegalStateException(); }
if (_component.equals(es.get_component_id())) { componentParallelism ++; ExecutorStats stats = es.get_stats(); if (stats != null) { Map<String,Map<String,Long>> statted =
TopologyInfo info = client.getTopologyInfo(id); for (ExecutorSummary es: info.get_executors()) { ExecutorStats stats = es.get_stats(); totalExecutors++; if (stats != null) {