private Map<Integer, Integer> machineDistribution(TopologyDetails topology) { int machineNum = isoMachines.get(topology.getName()).intValue(); int workerNum = topology.getNumWorkers(); TreeMap<Integer, Integer> distribution = Utils.integerDivided(workerNum, machineNum); if (distribution.containsKey(0)) { distribution.remove(0); } return distribution; }
@Override public boolean needsScheduling(TopologyDetails topology) { int desiredNumWorkers = topology.getNumWorkers(); int assignedNumWorkers = this.getAssignedNumWorkers(topology); return desiredNumWorkers > assignedNumWorkers || getUnassignedExecutors(topology).size() > 0; }
private Set<Set<ExecutorDetails>> computeWorkerSpecs(TopologyDetails topology) { Map<String, List<ExecutorDetails>> compExecutors = Utils.reverseMap(topology.getExecutorToComponent()); List<ExecutorDetails> allExecutors = new ArrayList<ExecutorDetails>(); Collection<List<ExecutorDetails>> values = compExecutors.values(); for (List<ExecutorDetails> eList : values) { allExecutors.addAll(eList); } int numWorkers = topology.getNumWorkers(); int bucketIndex = 0; Map<Integer, Set<ExecutorDetails>> bucketExecutors = new HashMap<Integer, Set<ExecutorDetails>>(numWorkers); for (ExecutorDetails executor : allExecutors) { Set<ExecutorDetails> executors = bucketExecutors.get(bucketIndex); if (executors == null) { executors = new HashSet<ExecutorDetails>(); bucketExecutors.put(bucketIndex, executors); } executors.add(executor); bucketIndex = (bucketIndex + 1) % numWorkers; } return new HashSet<Set<ExecutorDetails>>(bucketExecutors.values()); }
Set<ExecutorDetails> allExecutors = topology.getExecutors(); Map<WorkerSlot, List<ExecutorDetails>> aliveAssigned = getAliveAssignedWorkerSlotExecutors(cluster, topology.getId()); int totalSlotsToUse = Math.min(topology.getNumWorkers(), availableSlots.size() + aliveAssigned.size());
int slots = td.getNumWorkers(); int assignedSlots = cluster.getAssignedNumWorkers(td); int tdSlotsNeeded = slots - assignedSlots;
public static void defaultSchedule(Topologies topologies, Cluster cluster) { for (TopologyDetails topology : cluster.needsSchedulingTopologies()) { List<WorkerSlot> availableSlots = cluster.getAvailableSlots(); Set<ExecutorDetails> allExecutors = topology.getExecutors(); Map<WorkerSlot, List<ExecutorDetails>> aliveAssigned = EvenScheduler.getAliveAssignedWorkerSlotExecutors(cluster, topology.getId()); Set<ExecutorDetails> aliveExecutors = new HashSet<ExecutorDetails>(); for (List<ExecutorDetails> list : aliveAssigned.values()) { aliveExecutors.addAll(list); } Set<WorkerSlot> canReassignSlots = slotsCanReassign(cluster, aliveAssigned.keySet()); int totalSlotsToUse = Math.min(topology.getNumWorkers(), canReassignSlots.size() + availableSlots.size()); Set<WorkerSlot> badSlots = null; if (totalSlotsToUse > aliveAssigned.size() || !allExecutors.equals(aliveExecutors)) { badSlots = badSlots(aliveAssigned, allExecutors.size(), totalSlotsToUse); } if (badSlots != null) { cluster.freeSlots(badSlots); } EvenScheduler.scheduleTopologiesEvenly(new Topologies(topology), cluster); } }
LOG.debug("Topology {} is not isolated", topId); int totalTasks = td.getExecutors().size(); int origRequest = td.getNumWorkers(); int slotsRequested = Math.min(totalTasks, origRequest); int slotsUsed = Node.countSlotsUsed(topId, allNodes);
int origRequest = td.getNumWorkers(); int slotsRequested = Math.min(totalTasks, origRequest); int slotsUsed = Node.countSlotsUsed(allNodes);
Set<List<Integer>> allExecs = topoToExec.get(id); Set<List<Integer>> aliveExecs = topoToAliveExecutors.get(id); int numDesiredWorkers = topo.getNumWorkers(); int numAssignedWorkers = numUsedWorkers(topoToSchedAssignment.get(id)); if (allExecs == null || allExecs.isEmpty() || !allExecs.equals(aliveExecs) || numDesiredWorkers > numAssignedWorkers) {
LOG.debug("Scheduling topology {}", topId); int totalTasks = td.getExecutors().size(); int origRequest = td.getNumWorkers(); int slotsRequested = Math.min(totalTasks, origRequest); int slotsUsed = Node.countSlotsUsed(topId, _nodes);
/** * Does the topology need scheduling? * * A topology needs scheduling if one of the following conditions holds: * <ul> * <li>Although the topology is assigned slots, but is squeezed. i.e. the topology is assigned less slots than desired.</li> * <li>There are unassigned executors in this topology</li> * </ul> */ public boolean needsScheduling(TopologyDetails topology) { int desiredNumWorkers = topology.getNumWorkers(); int assignedNumWorkers = this.getAssignedNumWorkers(topology); return desiredNumWorkers > assignedNumWorkers || this.getUnassignedExecutors(topology).size() > 0; }
List<MesosWorkerSlot> mesosWorkerSlots = perTopologySlotList.get(topologyId); int slotsRequested = topologyDetails.getNumWorkers(); int slotsAssigned = cluster.getAssignedNumWorkers(topologyDetails); int slotsAvailable = mesosWorkerSlots.size();
LOG.debug("Topology {} is not isolated",topId); int totalTasks = td.getExecutors().size(); int origRequest = td.getNumWorkers(); int slotsRequested = Math.min(totalTasks, origRequest); int slotsUsed = Node.countSlotsUsed(topId, allNodes);
int origRequest = td.getNumWorkers(); int slotsRequested = Math.min(totalTasks, origRequest); int slotsUsed = Node.countSlotsUsed(allNodes);
int slotsNeeded = topologyDetails.getNumWorkers();
int slotsNeeded = topologyDetails.getNumWorkers();
LOG.debug("Scheduling topology {}",topId); int totalTasks = td.getExecutors().size(); int origRequest = td.getNumWorkers(); int slotsRequested = Math.min(totalTasks, origRequest); int slotsUsed = Node.countSlotsUsed(topId, _nodes);