@Override public int compare(ExecutorDetails o1, ExecutorDetails o2) { return o1.getStartTask() - o2.getStartTask(); } });
NodeInfo info = execAndNodePort.getValue(); if (aliveExecutors.contains(exec)) { execToSlot.put(new ExecutorDetails(exec.get(0), exec.get(1)), nodePortToSlot.get(info));
/** * convert {topology-id -> SchedulerAssignment} to {topology-id -> {executor [node port]}}. * * @return {topology-id -> {executor [node port]}} mapping */ private static Map<String, Map<List<Long>, List<Object>>> computeTopoToExecToNodePort( Map<String, SchedulerAssignment> schedAssignments, List<String> assignedTopologyIds) { Map<String, Map<List<Long>, List<Object>>> ret = new HashMap<>(); for (Entry<String, SchedulerAssignment> schedEntry : schedAssignments.entrySet()) { Map<List<Long>, List<Object>> execToNodePort = new HashMap<>(); for (Entry<ExecutorDetails, WorkerSlot> execAndNodePort : schedEntry.getValue().getExecutorToSlot().entrySet()) { ExecutorDetails exec = execAndNodePort.getKey(); WorkerSlot slot = execAndNodePort.getValue(); execToNodePort.put(exec.toList(), slot.toList()); } ret.put(schedEntry.getKey(), execToNodePort); } for (String id : assignedTopologyIds) { ret.putIfAbsent(id, null); } return ret; }
List<List<ExecutorDetails>> executorsPerWorkerList = new ArrayList<>(); for (ExecutorDetails exec : executors) { executorsStrings.add(exec.toString());
public static Map<ExecutorDetails, String> genExecsAndComps(StormTopology topology, int spoutParallelism, int boltParallelism) { Map<ExecutorDetails, String> retMap = new HashMap<>(); int startTask = 0; int endTask = 1; for (Map.Entry<String, SpoutSpec> entry : topology.get_spouts().entrySet()) { for (int i = 0; i < spoutParallelism; i++) { retMap.put(new ExecutorDetails(startTask, endTask), entry.getKey()); startTask++; endTask++; } } for (Map.Entry<String, Bolt> entry : topology.get_bolts().entrySet()) { for (int i = 0; i < boltParallelism; i++) { retMap.put(new ExecutorDetails(startTask, endTask), entry.getKey()); startTask++; endTask++; } } return retMap; }
public int compare(ExecutorDetails e1, ExecutorDetails e2) { return e1.getStartTask() - e2.getStartTask(); } });
private TopologyDetails readTopologyDetails(String topoId, StormBase base) throws KeyNotFoundException, AuthorizationException, IOException, InvalidTopologyException { assert (base != null); assert (topoId != null); Map<String, Object> topoConf = readTopoConfAsNimbus(topoId, topoCache); StormTopology topo = readStormTopologyAsNimbus(topoId, topoCache); if (!base.is_set_principal()) { fixupBase(base, topoConf); stormClusterState.updateStorm(topoId, base); } Map<List<Integer>, String> rawExecToComponent = computeExecutorToComponent(topoId, base, topoConf, topo); Map<ExecutorDetails, String> executorsToComponent = new HashMap<>(); for (Entry<List<Integer>, String> entry : rawExecToComponent.entrySet()) { List<Integer> execs = entry.getKey(); ExecutorDetails execDetails = new ExecutorDetails(execs.get(0), execs.get(1)); executorsToComponent.put(execDetails, entry.getValue()); } return new TopologyDetails(topoId, topoConf, topo, base.get_num_workers(), executorsToComponent, base.get_launch_time_secs(), base.get_owner()); }
@Override public int compare(ExecutorDetails a, ExecutorDetails b) { return Integer.compare(a.getStartTask(), b.getStartTask()); } };
public static Map<ExecutorDetails, String> genExecsAndComps(StormTopology topology) { Map<ExecutorDetails, String> retMap = new HashMap<>(); int startTask = 0; int endTask = 0; for (Map.Entry<String, SpoutSpec> entry : topology.get_spouts().entrySet()) { SpoutSpec spout = entry.getValue(); String spoutId = entry.getKey(); int spoutParallelism = spout.get_common().get_parallelism_hint(); for (int i = 0; i < spoutParallelism; i++) { retMap.put(new ExecutorDetails(startTask, endTask), spoutId); startTask++; endTask++; } } for (Map.Entry<String, Bolt> entry : topology.get_bolts().entrySet()) { String boltId = entry.getKey(); Bolt bolt = entry.getValue(); int boltParallelism = bolt.get_common().get_parallelism_hint(); for (int i = 0; i < boltParallelism; i++) { retMap.put(new ExecutorDetails(startTask, endTask), boltId); startTask++; endTask++; } } return retMap; }
expectedScheduling.add(new HashSet<>(Arrays.asList(new ExecutorDetails(0, 0)))); //Spout expectedScheduling.add(new HashSet<>(Arrays.asList( new ExecutorDetails(2, 2), //bolt-1 new ExecutorDetails(4, 4), //bolt-2 new ExecutorDetails(6, 6)))); //bolt-3 expectedScheduling.add(new HashSet<>(Arrays.asList( new ExecutorDetails(1, 1), //bolt-1 new ExecutorDetails(3, 3), //bolt-2 new ExecutorDetails(5, 5)))); //bolt-3 HashSet<HashSet<ExecutorDetails>> foundScheduling = new HashSet<>(); SchedulerAssignment assignment = cluster.getAssignmentById("testTopology-id");
expectedScheduling.add(new HashSet<>(Arrays.asList(new ExecutorDetails(3, 3)))); //bolt-3 - 500 MB, 50% CPU, 2 GPU new ExecutorDetails(2, 2), //bolt-1 - 500 MB, 50% CPU, 0 GPU new ExecutorDetails(5, 5), //bolt-2 - 500 MB, 50% CPU, 1 GPU new ExecutorDetails(6, 6)))); //bolt-2 - 500 MB, 50% CPU, 1 GPU new ExecutorDetails(0, 0), //Spout - 500 MB, 50% CPU, 0 GPU new ExecutorDetails(1, 1), //bolt-1 - 500 MB, 50% CPU, 0 GPU new ExecutorDetails(4, 4)))); //bolt-3 500 MB, 50% cpu, 2 GPU
executorToSlot.put(new ExecutorDetails(0, 0), new WorkerSlot("r000s000", 0)); executorToSlot.put(new ExecutorDetails(1, 1), new WorkerSlot("r000s000", 1)); executorToSlot.put(new ExecutorDetails(2, 2), new WorkerSlot("r000s001", 1)); Map<String, SchedulerAssignment> existingAssignments = new HashMap<>(); assignment = new SchedulerAssignmentImpl(topology1.getId(), executorToSlot, null, null); executorToSlot.put(new ExecutorDetails(0, 0), new WorkerSlot("r000s000", 1)); // the worker to orphan executorToSlot.put(new ExecutorDetails(1, 1), new WorkerSlot("r000s000", 2)); // the worker that fails executorToSlot.put(new ExecutorDetails(2, 2), new WorkerSlot("r000s001", 1)); // the healthy worker existingAssignments = new HashMap<>(); assignment = new SchedulerAssignmentImpl(topology1.getId(), executorToSlot, null, null); existingAssignments.put(topology1.getId(), assignment); executorToSlot.remove(new ExecutorDetails(1, 1));
executors11.add(new ExecutorDetails(1, 1)); node.assign(node.getFreeSlots().iterator().next(), topology1, executors11); assertEquals(1, node.getRunningTopologies().size()); executors12.add(new ExecutorDetails(2, 2)); node.assign(node.getFreeSlots().iterator().next(), topology1, executors12); assertEquals(1, node.getRunningTopologies().size()); executors21.add(new ExecutorDetails(1, 1)); node.assign(node.getFreeSlots().iterator().next(), topology2, executors21); assertEquals(2, node.getRunningTopologies().size()); executors22.add(new ExecutorDetails(2, 2)); node.assign(node.getFreeSlots().iterator().next(), topology2, executors22); assertEquals(2, node.getRunningTopologies().size());