protected SingularityRequest createRequest(String requestId) { SingularityRequestBuilder bldr = new SingularityRequestBuilder(requestId, RequestType.SERVICE); bldr.setInstances(Optional.of(5)); bldr.setSlavePlacement(Optional.of(SlavePlacement.SEPARATE)); SingularityRequest request = bldr.build(); saveRequest(bldr.build()); return request; }
@Test public void testSlavePlacementGreedy() { initRequest(); initFirstDeploy(); saveAndSchedule(request.toBuilder().setInstances(Optional.of(3)).setSlavePlacement(Optional.of(SlavePlacement.GREEDY))); sms.resourceOffers(Arrays.asList(createOffer(20, 20000, 50000, "slave1", "host1"))); Assert.assertTrue(taskManager.getActiveTaskIds().size() == 3); }
bldr.setSlavePlacement(Optional.of(SlavePlacement.GREEDY)); SingularityRequest request = bldr.build(); saveRequest(request);
@Test public void testUnfrozenSlaveLaunchesTasks() { initRequest(); initFirstDeploy(); resourceOffers(); Assert.assertEquals(StateChangeResult.SUCCESS, slaveManager.changeState("slave1", MachineState.FROZEN, Optional.absent(), Optional.of("user1"))); saveAndSchedule(request.toBuilder().setInstances(Optional.of(2)).setSlavePlacement(Optional.of(SlavePlacement.SEPARATE))); resourceOffers(); Assert.assertEquals(0, taskManager.getTasksOnSlave(taskManager.getActiveTaskIds(), slaveManager.getObject("slave1").get()).size()); Assert.assertEquals(1, taskManager.getTasksOnSlave(taskManager.getActiveTaskIds(), slaveManager.getObject("slave2").get()).size()); Assert.assertEquals(StateChangeResult.SUCCESS, slaveManager.changeState("slave1", MachineState.ACTIVE, Optional.absent(), Optional.of("user1"))); resourceOffers(); Assert.assertEquals(1, taskManager.getTasksOnSlave(taskManager.getActiveTaskIds(), slaveManager.getObject("slave1").get()).size()); Assert.assertEquals(1, taskManager.getTasksOnSlave(taskManager.getActiveTaskIds(), slaveManager.getObject("slave2").get()).size()); }
@Test public void testSlavePlacementOptimistic() { initRequest(); initFirstDeploy(); saveAndSchedule(request.toBuilder().setInstances(Optional.of(20)).setSlavePlacement(Optional.of(SlavePlacement.OPTIMISTIC))); // Default behavior if we don't have info about other hosts that can run this task: be greedy. sms.resourceOffers(Arrays.asList(createOffer(2, 128 * 2, 1024 * 2, "slave1", "host1"))); Assert.assertEquals(2, taskManager.getActiveTaskIds().size()); // Now that at least one other host is running tasks for this request, we expect an even-ish spread, // but because we have many tasks pending, we allow quite a bit of unevenness. sms.resourceOffers(Arrays.asList(createOffer(20, 20000, 50000, "slave2", "host2"))); Assert.assertEquals(13, taskManager.getActiveTaskIds().size()); // ...but now we won't schedule more tasks on host2, because it's hosting a disproportionate number of tasks. sms.resourceOffers(Arrays.asList(createOffer(20, 20000, 50000, "slave2", "host2"))); Assert.assertEquals(13, taskManager.getActiveTaskIds().size()); // ...but since host1 is only hosting 2 tasks, we will schedule more tasks on it when an offer is received. sms.resourceOffers(Arrays.asList(createOffer(20, 20000, 50000, "slave1", "host1"))); Assert.assertEquals(20, taskManager.getActiveTaskIds().size()); Map<String, List<SingularityTaskId>> tasksByHost = taskManager.getActiveTaskIdsForRequest(request.getId()).stream() .collect(Collectors.groupingBy(SingularityTaskId::getSanitizedHost)); Assert.assertNotNull(tasksByHost.get("host1")); Assert.assertEquals(9, tasksByHost.get("host1").size()); Assert.assertNotNull(tasksByHost.get("host2")); Assert.assertEquals(11, tasksByHost.get("host2").size()); }
@Test public void testSlavePlacementSeparate() { initRequest(); initFirstDeploy(); saveAndSchedule(request.toBuilder().setInstances(Optional.of(2)).setSlavePlacement(Optional.of(SlavePlacement.SEPARATE))); sms.resourceOffers(Arrays.asList(createOffer(20, 20000, 50000, "slave1", "host1"), createOffer(20, 20000, 50000, "slave1", "host1"))); Assert.assertTrue(taskManager.getPendingTaskIds().size() == 1); Assert.assertTrue(taskManager.getActiveTaskIds().size() == 1); sms.resourceOffers(Arrays.asList(createOffer(20, 20000, 50000, "slave1", "host1"))); Assert.assertTrue(taskManager.getPendingTaskIds().size() == 1); Assert.assertTrue(taskManager.getActiveTaskIds().size() == 1); eventListener.taskHistoryUpdateEvent(new SingularityTaskHistoryUpdate(taskManager.getActiveTaskIds().get(0), System.currentTimeMillis(), ExtendedTaskState.TASK_CLEANING, Optional.<String>absent(), Optional.<String>absent())); sms.resourceOffers(Arrays.asList(createOffer(20, 20000, 50000, "slave1", "host1"))); Assert.assertTrue(taskManager.getPendingTaskIds().size() == 1); Assert.assertTrue(taskManager.getActiveTaskIds().size() == 1); sms.resourceOffers(Arrays.asList(createOffer(20, 20000, 50000, "slave2", "host2"))); Assert.assertTrue(taskManager.getPendingTaskIds().isEmpty()); Assert.assertTrue(taskManager.getActiveTaskIds().size() == 2); }
@Test public void testOfferCache() { configuration.setCacheOffers(true); configuration.setOfferCacheSize(2); List<Offer> offers2 = resourceOffers(); sms.rescind(offers2.get(0).getId()); initRequest(); initFirstDeploy(); requestResource.postRequest(request.toBuilder().setSlavePlacement(Optional.of(SlavePlacement.SEPARATE)).setInstances(Optional.of(2)).build(), singularityUser); schedulerPoller.runActionOnPoll(); Assert.assertEquals(1, taskManager.getActiveTasks().size()); resourceOffers(); Assert.assertEquals(2, taskManager.getActiveTasks().size()); }
.setInstances(Optional.of(2)) .setRackSensitive(Optional.of(true)) .setSlavePlacement(Optional.of(SlavePlacement.SEPARATE)) .setAllowBounceToSameHost(Optional.of(true)) .build();
@Test public void testSlavePlacementSpread() { initRequest(); initFirstDeploy(); saveAndSchedule(request.toBuilder().setInstances(Optional.of(1)).setSlavePlacement(Optional.of(SlavePlacement.SPREAD_ALL_SLAVES))); sms.resourceOffers(Arrays.asList(createOffer(20, 20000, 50000, "slave1", "host1", Optional.of("rack1")))); // assert one Request on one slave. Assert.assertTrue(slaveManager.getNumObjectsAtState(MachineState.ACTIVE) == 1); Assert.assertTrue(taskManager.getPendingTaskIds().size() == 0); Assert.assertTrue(taskManager.getActiveTaskIds().size() == 1); sms.resourceOffers(Arrays.asList(createOffer(20, 20000, 50000, "slave2", "host2"))); Assert.assertTrue(slaveManager.getNumObjectsAtState(MachineState.ACTIVE) == 2); spreadAllPoller.runActionOnPoll(); scheduler.drainPendingQueue(); sms.resourceOffers(Arrays.asList(createOffer(20, 20000, 50000, "slave2", "host2"))); // assert Request is spread over the two slaves Assert.assertTrue(taskManager.getPendingTaskIds().size() == 0); Assert.assertTrue(taskManager.getActiveTaskIds().size() == 2); Assert.assertEquals(1, taskManager.getTasksOnSlave(taskManager.getActiveTaskIds(), slaveManager.getObject("slave1").get()).size()); Assert.assertEquals(1, taskManager.getTasksOnSlave(taskManager.getActiveTaskIds(), slaveManager.getObject("slave2").get()).size()); // decommission slave and kill task slaveManager.changeState("slave2", MachineState.FROZEN, Optional.<String>absent(), Optional.<String>absent()); slaveManager.changeState("slave2", MachineState.STARTING_DECOMMISSION, Optional.<String>absent(), Optional.<String>absent()); cleaner.drainCleanupQueue(); statusUpdate(taskManager.getTasksOnSlave(taskManager.getActiveTaskIds(), slaveManager.getObject("slave2").get()).get(0), TaskState.TASK_KILLED); spreadAllPoller.runActionOnPoll(); scheduler.drainPendingQueue(); Assert.assertTrue(taskManager.getPendingTaskIds().isEmpty()); Assert.assertTrue(taskManager.getActiveTaskIds().size() == 1); }
@Test public void testOfferCacheRescindOffers() { configuration.setCacheOffers(true); configuration.setOfferCacheSize(2); List<Offer> offers2 = resourceOffers(); // cached as well sms.rescind(offers2.get(0).getId()); sms.rescind(offers2.get(1).getId()); initRequest(); initFirstDeploy(); requestResource.postRequest(request.toBuilder().setSlavePlacement(Optional.of(SlavePlacement.SEPARATE)).setInstances(Optional.of(2)).build(), singularityUser); schedulerPoller.runActionOnPoll(); Assert.assertEquals(0, taskManager.getActiveTasks().size()); resourceOffers(); int numTasks = taskManager.getActiveTasks().size(); Assert.assertEquals(2, numTasks); startAndDeploySecondRequest(); schedulerPoller.runActionOnPoll(); Assert.assertEquals(numTasks, taskManager.getActiveTasks().size()); resourceOffers(); Assert.assertTrue(taskManager.getActiveTasks().size() > numTasks); }
.setSlavePlacement(Optional.of(SlavePlacement.SEPARATE_BY_REQUEST)) .setInstances(Optional.of(2)).build(), singularityUser );
initFirstDeploy(); saveAndSchedule(request.toBuilder().setSlavePlacement(Optional.of(SlavePlacement.GREEDY)).setInstances(Optional.of(2)));
.setRackAffinity(copyOfList(rackAffinity)) .setWaitAtLeastMillisAfterTaskFinishesForReschedule(waitAtLeastMillisAfterTaskFinishesForReschedule) .setSlavePlacement(slavePlacement) .setRequiredSlaveAttributes(requiredSlaveAttributes) .setAllowedSlaveAttributes(allowedSlaveAttributes)
protected SingularityRequest createRequest(String requestId) { SingularityRequestBuilder bldr = new SingularityRequestBuilder(requestId, RequestType.SERVICE); bldr.setInstances(Optional.of(5)); bldr.setSlavePlacement(Optional.of(SlavePlacement.SEPARATE)); SingularityRequest request = bldr.build(); saveRequest(bldr.build()); return request; }
@Test public void testSlavePlacementGreedy() { initRequest(); initFirstDeploy(); saveAndSchedule(request.toBuilder().setInstances(Optional.of(3)).setSlavePlacement(Optional.of(SlavePlacement.GREEDY))); sms.resourceOffers(Arrays.asList(createOffer(20, 20000, 50000, "slave1", "host1"))); Assert.assertTrue(taskManager.getActiveTaskIds().size() == 3); }
@Test public void testUnfrozenSlaveLaunchesTasks() { initRequest(); initFirstDeploy(); resourceOffers(); Assert.assertEquals(StateChangeResult.SUCCESS, slaveManager.changeState("slave1", MachineState.FROZEN, Optional.absent(), Optional.of("user1"))); saveAndSchedule(request.toBuilder().setInstances(Optional.of(2)).setSlavePlacement(Optional.of(SlavePlacement.SEPARATE))); resourceOffers(); Assert.assertEquals(0, taskManager.getTasksOnSlave(taskManager.getActiveTaskIds(), slaveManager.getObject("slave1").get()).size()); Assert.assertEquals(1, taskManager.getTasksOnSlave(taskManager.getActiveTaskIds(), slaveManager.getObject("slave2").get()).size()); Assert.assertEquals(StateChangeResult.SUCCESS, slaveManager.changeState("slave1", MachineState.ACTIVE, Optional.absent(), Optional.of("user1"))); resourceOffers(); Assert.assertEquals(1, taskManager.getTasksOnSlave(taskManager.getActiveTaskIds(), slaveManager.getObject("slave1").get()).size()); Assert.assertEquals(1, taskManager.getTasksOnSlave(taskManager.getActiveTaskIds(), slaveManager.getObject("slave2").get()).size()); }
@Test public void testSlavePlacementOptimistic() { initRequest(); initFirstDeploy(); saveAndSchedule(request.toBuilder().setInstances(Optional.of(20)).setSlavePlacement(Optional.of(SlavePlacement.OPTIMISTIC))); // Default behavior if we don't have info about other hosts that can run this task: be greedy. sms.resourceOffers(Arrays.asList(createOffer(2, 128 * 2, 1024 * 2, "slave1", "host1"))); Assert.assertEquals(2, taskManager.getActiveTaskIds().size()); // Now that at least one other host is running tasks for this request, we expect an even-ish spread, // but because we have many tasks pending, we allow quite a bit of unevenness. sms.resourceOffers(Arrays.asList(createOffer(20, 20000, 50000, "slave2", "host2"))); Assert.assertEquals(13, taskManager.getActiveTaskIds().size()); // ...but now we won't schedule more tasks on host2, because it's hosting a disproportionate number of tasks. sms.resourceOffers(Arrays.asList(createOffer(20, 20000, 50000, "slave2", "host2"))); Assert.assertEquals(13, taskManager.getActiveTaskIds().size()); // ...but since host1 is only hosting 2 tasks, we will schedule more tasks on it when an offer is received. sms.resourceOffers(Arrays.asList(createOffer(20, 20000, 50000, "slave1", "host1"))); Assert.assertEquals(20, taskManager.getActiveTaskIds().size()); Map<String, List<SingularityTaskId>> tasksByHost = taskManager.getActiveTaskIdsForRequest(request.getId()).stream() .collect(Collectors.groupingBy(SingularityTaskId::getSanitizedHost)); Assert.assertNotNull(tasksByHost.get("host1")); Assert.assertEquals(9, tasksByHost.get("host1").size()); Assert.assertNotNull(tasksByHost.get("host2")); Assert.assertEquals(11, tasksByHost.get("host2").size()); }
@Test public void testSlavePlacementSeparate() { initRequest(); initFirstDeploy(); saveAndSchedule(request.toBuilder().setInstances(Optional.of(2)).setSlavePlacement(Optional.of(SlavePlacement.SEPARATE))); sms.resourceOffers(Arrays.asList(createOffer(20, 20000, 50000, "slave1", "host1"), createOffer(20, 20000, 50000, "slave1", "host1"))); Assert.assertTrue(taskManager.getPendingTaskIds().size() == 1); Assert.assertTrue(taskManager.getActiveTaskIds().size() == 1); sms.resourceOffers(Arrays.asList(createOffer(20, 20000, 50000, "slave1", "host1"))); Assert.assertTrue(taskManager.getPendingTaskIds().size() == 1); Assert.assertTrue(taskManager.getActiveTaskIds().size() == 1); eventListener.taskHistoryUpdateEvent(new SingularityTaskHistoryUpdate(taskManager.getActiveTaskIds().get(0), System.currentTimeMillis(), ExtendedTaskState.TASK_CLEANING, Optional.<String>absent(), Optional.<String>absent())); sms.resourceOffers(Arrays.asList(createOffer(20, 20000, 50000, "slave1", "host1"))); Assert.assertTrue(taskManager.getPendingTaskIds().size() == 1); Assert.assertTrue(taskManager.getActiveTaskIds().size() == 1); sms.resourceOffers(Arrays.asList(createOffer(20, 20000, 50000, "slave2", "host2"))); Assert.assertTrue(taskManager.getPendingTaskIds().isEmpty()); Assert.assertTrue(taskManager.getActiveTaskIds().size() == 2); }
@Test public void testOfferCache() { configuration.setCacheOffers(true); configuration.setOfferCacheSize(2); List<Offer> offers2 = resourceOffers(); sms.rescind(offers2.get(0).getId()); initRequest(); initFirstDeploy(); requestResource.postRequest(request.toBuilder().setSlavePlacement(Optional.of(SlavePlacement.SEPARATE)).setInstances(Optional.of(2)).build(), singularityUser); schedulerPoller.runActionOnPoll(); Assert.assertEquals(1, taskManager.getActiveTasks().size()); resourceOffers(); Assert.assertEquals(2, taskManager.getActiveTasks().size()); }
@Test public void testOfferCacheRescindOffers() { configuration.setCacheOffers(true); configuration.setOfferCacheSize(2); List<Offer> offers2 = resourceOffers(); // cached as well sms.rescind(offers2.get(0).getId()); sms.rescind(offers2.get(1).getId()); initRequest(); initFirstDeploy(); requestResource.postRequest(request.toBuilder().setSlavePlacement(Optional.of(SlavePlacement.SEPARATE)).setInstances(Optional.of(2)).build(), singularityUser); schedulerPoller.runActionOnPoll(); Assert.assertEquals(0, taskManager.getActiveTasks().size()); resourceOffers(); int numTasks = taskManager.getActiveTasks().size(); Assert.assertEquals(2, numTasks); startAndDeploySecondRequest(); schedulerPoller.runActionOnPoll(); Assert.assertEquals(numTasks, taskManager.getActiveTasks().size()); resourceOffers(); Assert.assertTrue(taskManager.getActiveTasks().size() > numTasks); }