protected SchedulingResult mkNotEnoughResources(TopologyDetails td) { return SchedulingResult.failure( SchedulingStatus.FAIL_NOT_ENOUGH_RESOURCES, td.getExecutors().size() + " executors not scheduled"); }
public double getRequestedNonSharedOffHeap() { double totalMemOffHeap = 0.0; for (ExecutorDetails exec : this.getExecutors()) { Double execMem = getOffHeapMemoryRequirement(exec); if (execMem != null) { totalMemOffHeap += execMem; } } return totalMemOffHeap; }
public double getRequestedNonSharedOnHeap() { double totalMemOnHeap = 0.0; for (ExecutorDetails exec : this.getExecutors()) { Double execMem = getOnHeapMemoryRequirement(exec); if (execMem != null) { totalMemOnHeap += execMem; } } return totalMemOnHeap; }
/** * Note: The public API relevant to resource aware scheduling is unstable as of May 2015. We reserve the right to change them. * * @return the total cpu requested for this topology */ public double getTotalRequestedCpu() { double totalCpu = 0.0; for (ExecutorDetails exec : this.getExecutors()) { Double execCpu = getTotalCpuReqTask(exec); if (execCpu != null) { totalCpu += execCpu; } } return totalCpu; }
@Override public Map<ExecutorDetails, String> getNeedsSchedulingExecutorToComponents( TopologyDetails topology) { Collection<ExecutorDetails> allExecutors = new HashSet<>(topology.getExecutors()); SchedulerAssignment assignment = assignments.get(topology.getId()); if (assignment != null) { allExecutors.removeAll(assignment.getExecutors()); } return topology.selectExecutorToComponent(allExecutors); }
@Override public Collection<ExecutorDetails> getUnassignedExecutors(TopologyDetails topology) { if (topology == null) { return new ArrayList<>(0); } Collection<ExecutorDetails> ret = new HashSet<>(topology.getExecutors()); SchedulerAssignment assignment = getAssignmentById(topology.getId()); if (assignment != null) { Set<ExecutorDetails> assignedExecutors = assignment.getExecutors(); ret.removeAll(assignedExecutors); } return ret; }
private static Map<ExecutorDetails, WorkerSlot> scheduleTopology(TopologyDetails topology, Cluster cluster) { List<WorkerSlot> availableSlots = cluster.getAvailableSlots(); Set<ExecutorDetails> allExecutors = topology.getExecutors(); Map<WorkerSlot, List<ExecutorDetails>> aliveAssigned = getAliveAssignedWorkerSlotExecutors(cluster, topology.getId()); int totalSlotsToUse = Math.min(topology.getNumWorkers(), availableSlots.size() + aliveAssigned.size());
for (ExecutorDetails exec : getExecutors()) { if (!resourceList.containsKey(exec)) { LOG.debug(
public static void defaultSchedule(Topologies topologies, Cluster cluster) { for (TopologyDetails topology : cluster.needsSchedulingTopologies()) { List<WorkerSlot> availableSlots = cluster.getAvailableSlots(); Set<ExecutorDetails> allExecutors = topology.getExecutors(); Map<WorkerSlot, List<ExecutorDetails>> aliveAssigned = EvenScheduler.getAliveAssignedWorkerSlotExecutors(cluster, topology.getId()); Set<ExecutorDetails> aliveExecutors = new HashSet<ExecutorDetails>(); for (List<ExecutorDetails> list : aliveAssigned.values()) { aliveExecutors.addAll(list); } Set<WorkerSlot> canReassignSlots = slotsCanReassign(cluster, aliveAssigned.keySet()); int totalSlotsToUse = Math.min(topology.getNumWorkers(), canReassignSlots.size() + availableSlots.size()); Set<WorkerSlot> badSlots = null; if (totalSlotsToUse > aliveAssigned.size() || !allExecutors.equals(aliveExecutors)) { badSlots = badSlots(aliveAssigned, allExecutors.size(), totalSlotsToUse); } if (badSlots != null) { cluster.freeSlots(badSlots); } EvenScheduler.scheduleTopologiesEvenly(new Topologies(topology), cluster); } }
Integer effectiveNodesRequested = null; if (nodesRequested != null) { effectiveNodesRequested = Math.min(td.getExecutors().size(), +nodesRequested.intValue());
allNodes.addAll(foundMore); int totalTasks = td.getExecutors().size(); int origRequest = td.getNumWorkers(); int slotsRequested = Math.min(totalTasks, origRequest);
SchedulingResult.failure( SchedulingStatus.FAIL_NOT_ENOUGH_RESOURCES, (td.getExecutors().size() - unassignedExecutors.size()) + "/" + td.getExecutors().size() + " executors scheduled"); } else {
String topId = td.getId(); LOG.debug("Topology {} is not isolated", topId); int totalTasks = td.getExecutors().size(); int origRequest = td.getNumWorkers(); int slotsRequested = Math.min(totalTasks, origRequest);
SchedulingResult.failure( SchedulingStatus.FAIL_NOT_ENOUGH_RESOURCES, (td.getExecutors().size() - unassignedExecutors.size()) + "/" + td.getExecutors().size() + " executors scheduled"); } else {
if (_cluster.needsScheduling(td)) { LOG.debug("Scheduling topology {}", topId); int totalTasks = td.getExecutors().size(); int origRequest = td.getNumWorkers(); int slotsRequested = Math.min(totalTasks, origRequest);
Iterator<ExecutorDetails> executorIterator = topo2.getExecutors().iterator(); List<String> nodeHostnames = rackToNodes.get("rack-1"); for (int i = 0; i< topo2.getExecutors().size()/2; i++) { String nodeHostname = nodeHostnames.get(i % nodeHostnames.size()); RAS_Node node = rs.hostnameToNodes(nodeHostname).get(0);
Iterator<ExecutorDetails> executorIterator = topo2.getExecutors().iterator(); List<String> nodeHostnames = rackToNodes.get("rack-1"); for (int i = 0; i< topo2.getExecutors().size()/2; i++) { String nodeHostname = nodeHostnames.get(i % nodeHostnames.size()); RAS_Node node = rs.hostnameToNodes(nodeHostname).get(0);
@Test public void testResourcePoolUtilization() { INimbus iNimbus = new INimbusTest(); Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 100, 1000); Double cpuGuarantee = 400.0; Double memoryGuarantee = 1000.0; Map<String, Map<String, Number>> resourceUserPool = userResourcePool( userRes("user1", cpuGuarantee, memoryGuarantee)); Config config = createClusterConfig(100, 200, 200, resourceUserPool); TopologyDetails topo1 = genTopology("topo-1", config, 1, 1, 2, 1, Time.currentTimeSecs() - 24, 9, "user1"); Topologies topologies = new Topologies(topo1); Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config); User user1 = new User("user1", toDouble(resourceUserPool.get("user1"))); WorkerSlot slot = cluster.getAvailableSlots().get(0); cluster.assign(slot, topo1.getId(), topo1.getExecutors()); Assert.assertEquals("check cpu resource guarantee", cpuGuarantee, user1.getCpuResourceGuaranteed(), 0.001); Assert.assertEquals("check memory resource guarantee", memoryGuarantee, user1.getMemoryResourceGuaranteed(), 0.001); Assert.assertEquals("check cpu resource pool utilization", ((100.0 * 3.0) / cpuGuarantee), user1.getCpuResourcePoolUtilization(cluster), 0.001); Assert.assertEquals("check memory resource pool utilization", ((200.0 + 200.0) * 3.0) / memoryGuarantee, user1.getMemoryResourcePoolUtilization(cluster), 0.001); } }
/** * @param topology * @return a executor -> component-id map which needs scheduling in this topology. */ public Map<ExecutorDetails, String> getNeedsSchedulingExecutorToComponents(TopologyDetails topology) { Collection<ExecutorDetails> allExecutors = new HashSet(topology.getExecutors()); SchedulerAssignment assignment = this.assignments.get(topology.getId()); if (assignment != null) { Collection<ExecutorDetails> assignedExecutors = assignment.getExecutors(); allExecutors.removeAll(assignedExecutors); } return topology.selectExecutorToComponent(allExecutors); }
/** * get the unassigned executors of the topology. */ public Collection<ExecutorDetails> getUnassignedExecutors(TopologyDetails topology) { if (topology == null) { return new ArrayList<ExecutorDetails>(0); } Collection<ExecutorDetails> ret = new HashSet(topology.getExecutors()); SchedulerAssignment assignment = this.getAssignmentById(topology.getId()); if (assignment != null) { Set<ExecutorDetails> assignedExecutors = assignment.getExecutors(); ret.removeAll(assignedExecutors); } return ret; }