public static TaskInfo copyVolumes( final TaskInfo source, final TaskInfo target) { List<Resource> sourceDisks = source.getResourcesList().stream() .filter(resource -> isDisk(resource) && resource.hasDisk()) .collect(Collectors.toList()); List<Resource> minusDisks = target.getResourcesList().stream() .filter(resource -> !isDisk(resource)) .collect(Collectors.toList()); return TaskInfo.newBuilder(target) .clearResources() .addAllResources(minusDisks) .addAllResources(sourceDisks) .build(); }
private Iterator<Long> getPortMappingIterator(TaskInfo taskInfo) { List<Resource> list = taskInfo.getResourcesList(); List<Long> ports = new ArrayList<Long>(); for (Resource resource : list) { String name = resource.getName(); if ("ports".equals(name)) { Ranges ranges = resource.getRanges(); for (Range range : ranges.getRangeList()) { long startPort = range.getBegin(); long endPort = range.getEnd(); for (int i = 0; i <= endPort - startPort; i++) { ports.add(startPort + i); } } } } return ports.iterator(); }
/** * Pretty-print mesos protobuf TaskInfo. * <p/> * XXX(erikdw): not including command, container (+data), nor health_check. */ public static String taskInfoToString(TaskInfo task) { Map<String, String> map = new LinkedHashMap<>(); map.put("task_id", task.getTaskId().getValue()); map.put("slave_id", task.getSlaveId().getValue()); map.putAll(resourcesToOrderedMap(task.getResourcesList())); map.put("executor_id", task.getExecutor().getExecutorId().getValue()); return JSONValue.toJSONString(map); }
@Test public void testLaunchExpectedMultiplePorts() throws Exception { // Launch for the first time: get ports 10000,10001 PodInstanceRequirement podInstanceRequirement = PodInstanceRequirementTestUtils.getPortRequirement(10000, 10001); List<Resource> reserveResources = recordLaunchWithCompleteOfferedResources( podInstanceRequirement, ResourceTestUtils.getUnreservedPorts(10000, 10001)); Assert.assertEquals(reserveResources.toString(), 5, reserveResources.size()); String resourceId0 = getResourceId(reserveResources.get(3)); String resourceId1 = getResourceId(reserveResources.get(4)); Collection<Resource> expectedResources = getExpectedExecutorResources( stateStore.fetchTasks().iterator().next().getExecutor()); expectedResources.addAll(Arrays.asList( ResourceTestUtils.getReservedPorts(10000, 10000, resourceId0), ResourceTestUtils.getReservedPorts(10001, 10001, resourceId1))); // Now try relaunch: List<OfferRecommendation> recommendations = evaluator.evaluate( PodInstanceRequirementTestUtils.getPortRequirement(10000, 10001), Arrays.asList(OfferTestUtils.getOffer(expectedResources))); Assert.assertEquals(2, recommendations.size()); // Validate LAUNCH Operation Operation launchOperation = recommendations.get(0).getOperation().get(); Assert.assertEquals(Operation.Type.LAUNCH_GROUP, launchOperation.getType()); List<Resource> launchResources = launchOperation.getLaunchGroup().getTaskGroup().getTasks(0).getResourcesList(); Assert.assertEquals(launchResources.toString(), 2, launchResources.size()); Assert.assertEquals(resourceId0, getResourceId(launchResources.get(0))); Assert.assertEquals(resourceId1, getResourceId(launchResources.get(1))); Assert.assertFalse(recommendations.get(1).getOperation().isPresent()); }
@Test public void testGetUpdateOfferRequirement() throws Exception { OfferRequirement requirement = provider.getNewOfferRequirement( CassandraTask.TYPE.CASSANDRA_DAEMON.name(), testTaskInfo); Protos.TaskInfo taskInfo = requirement.getTaskRequirements().iterator().next().getTaskInfo(); Assert.assertEquals(taskInfo.getName(), "test-daemon"); Assert.assertTrue(taskInfo.getTaskId().getValue().contains("test-daemon")); Assert.assertEquals("", taskInfo.getSlaveId().getValue()); List<Protos.Resource> resources = taskInfo.getResourcesList(); Assert.assertEquals(4, resources.size()); Protos.Resource cpusResource = resources.get(0); Assert.assertEquals("cpus", cpusResource.getName()); Assert.assertEquals(testCpus, cpusResource.getScalar().getValue(), 0.0); Protos.Resource memResource = resources.get(1); Assert.assertEquals("mem", memResource.getName()); Assert.assertEquals(testMem, memResource.getScalar().getValue(), 0.0); Protos.Resource diskResource = resources.get(2); Assert.assertEquals("disk", diskResource.getName()); Assert.assertEquals(testDisk, diskResource.getScalar().getValue(), 0.0); Protos.Resource portsResource = resources.get(3); Assert.assertEquals("ports", portsResource.getName()); Assert.assertTrue(portsResource.getRanges().getRangeList().get(0).getBegin() >= testPortBegin); Assert.assertTrue(portsResource.getRanges().getRangeList().get(0).getEnd() >= testPortBegin); }
assertEquals("elasticsearch_localhost_19700101T010203.400Z", taskInfo.getTaskId().getValue()); List<Protos.Resource> resourceList = taskInfo.getResourcesList(); assertEquals(configuration.getCpus(), getResourceByName(resourceList, "cpus").getScalar().getValue(), EPSILON);
Assert.assertEquals("", taskInfo.getSlaveId().getValue()); List<Protos.Resource> resources = taskInfo.getResourcesList(); Assert.assertEquals(4, resources.size());
/** * This method keeps the resources associated with tasks in the state store up to date, when a task which shares * their resource-set is launched. * * @param podInstance the parent pod associated with the task being launched * @param taskInfo the task being launched */ @VisibleForTesting void updateTaskResourcesWithinResourceSet(PodInstance podInstance, Protos.TaskInfo taskInfo) { Optional<TaskSpec> taskSpec = TaskUtils.getTaskSpec(podInstance, taskInfo.getName()); if (!taskSpec.isPresent()) { return; } // Update any other TaskInfos in this resource set to have the same resources: Collection<Protos.TaskInfo> taskInfosWithSameResourceSet = getOtherTasksInResourceSet(podInstance, taskSpec.get()); stateStore.storeTasks(updateTasksWithResources( taskInfosWithSameResourceSet, taskInfo.getResourcesList(), taskInfo.hasExecutor() ? Optional.of(taskInfo.getExecutor().getResourcesList()) : Optional.empty())); }
public static Protos.Offer generateUpdateOffer( String frameworkId, Protos.TaskInfo taskInfo, Protos.TaskInfo templateTaskInfo, double cpu, int memory, int disk) { final String offerUUID = UUID.randomUUID().toString(); return Protos.Offer .newBuilder() .setId(Protos.OfferID.newBuilder().setValue(offerUUID)) .setFrameworkId(Protos.FrameworkID.newBuilder().setValue(frameworkId)) .setSlaveId(Protos.SlaveID.newBuilder().setValue(taskInfo.getSlaveId().getValue())) .setHostname("127.0.0.1") .addAllResources(taskInfo.getResourcesList()) .addAllResources(taskInfo.getExecutor().getResourcesList()) .addAllResources(templateTaskInfo.getResourcesList()) .addResources(ResourceUtils.getUnreservedScalar("cpus", cpu)) .addResources(ResourceUtils.getUnreservedScalar("mem", memory)) .addResources(ResourceUtils.getUnreservedScalar("disk", disk)) .build(); }
@Test public void testUnexpectedPermanentlyFailedResources() throws Exception { install(); StateStore stateStore = new StateStore(persister); // Pick an arbitrary task with resources: Protos.TaskInfo taskInfo = stateStore.fetchTasks().iterator().next(); Assert.assertFalse(taskInfo.getResourcesList().isEmpty()); // Verify that the task's resources are currently expected: UnexpectedResourcesResponse response = defaultScheduler.getUnexpectedResources(OfferTestUtils.getOffers(taskInfo.getResourcesList())); Assert.assertEquals(UnexpectedResourcesResponse.Result.PROCESSED, response.result); Assert.assertTrue(response.offerResources.isEmpty()); // Mark the task as permanently failed: stateStore.storeTasks(Collections.singletonList( taskInfo.toBuilder() .setLabels(new TaskLabelWriter(taskInfo).setPermanentlyFailed().toProto()) .build())); // Verify that the task's resources are no longer expected: response = defaultScheduler.getUnexpectedResources(OfferTestUtils.getOffers(taskInfo.getResourcesList())); Assert.assertEquals(UnexpectedResourcesResponse.Result.PROCESSED, response.result); Assert.assertEquals(1, response.offerResources.size()); Assert.assertEquals(taskInfo.getResourcesList(), response.offerResources.iterator().next().getResources()); }
public CassandraDaemonTask updateConfig(CassandraConfig cassandraConfig, ExecutorConfig executorConfig, UUID targetConfigName) { LOGGER.info("Updating config for task: {} to config: {}", getTaskInfo().getName(), targetConfigName.toString()); final Protos.Label label = Protos.Label.newBuilder() .setKey("config_target") .setValue(targetConfigName.toString()) .build(); return new CassandraDaemonTask(getBuilder() .setExecutor(getExecutor().update(executorConfig).getExecutorInfo()) .setTaskId(createId(getName())) .setData(getData().withNewConfig(cassandraConfig).getBytes()) .clearResources() .addAllResources(TaskUtils.updateResources( cassandraConfig.getCpus(), cassandraConfig.getMemoryMb(), getTaskInfo().getResourcesList() )) .clearLabels() .setLabels(Protos.Labels.newBuilder().addLabels(label).build()).build()); }
@Test public void testUnexpectedDecommissioningResources() throws Exception { install(); StateStore stateStore = new StateStore(persister); // Pick an arbitrary task with resources: Protos.TaskInfo taskInfo = stateStore.fetchTasks().iterator().next(); Assert.assertFalse(taskInfo.getResourcesList().isEmpty()); // Verify that the task's resources are currently expected: UnexpectedResourcesResponse response = defaultScheduler.getUnexpectedResources(OfferTestUtils.getOffers(taskInfo.getResourcesList())); Assert.assertEquals(UnexpectedResourcesResponse.Result.PROCESSED, response.result); Assert.assertTrue(response.offerResources.isEmpty()); // Mark the task as decommissioning: stateStore.storeGoalOverrideStatus(taskInfo.getName(), DecommissionPlanFactory.DECOMMISSIONING_STATUS); // Verify that the task's resources are no longer expected: response = defaultScheduler.getUnexpectedResources(OfferTestUtils.getOffers(taskInfo.getResourcesList())); Assert.assertEquals(UnexpectedResourcesResponse.Result.PROCESSED, response.result); Assert.assertEquals(1, response.offerResources.size()); Assert.assertEquals(taskInfo.getResourcesList(), response.offerResources.iterator().next().getResources()); // Note: The task's resources are still present in the state store, because we didn't set up a decommission plan }
TaskResourceMapper( Collection<String> taskSpecNames, ResourceSet resourceSet, Protos.TaskInfo taskInfo, Optional<String> resourceNamespace) { logger = LoggingUtils.getLogger(getClass(), resourceNamespace); this.resourceNamespace = resourceNamespace; // Multiple tasks may share a resource set. When a resource set is updated, we want to ensure that all tasks // attached to the resource set receive the update. this.taskSpecNames = taskSpecNames; this.resourceSpecs = new ArrayList<>(); this.resourceSpecs.addAll(resourceSet.getResources()); this.resourceSpecs.addAll(resourceSet.getVolumes()); this.taskPortFinder = new TaskPortLookup(taskInfo); this.resources = taskInfo.getResourcesList(); // ONLY call this AFTER initializing all members above: this.evaluationStages = getEvaluationStagesInternal(); }
public static Protos.Offer generateReplacementOffer( String frameworkId, Protos.TaskInfo taskInfo, Protos.TaskInfo templateTaskInfo) { final String offerUUID = UUID.randomUUID().toString(); return Protos.Offer .newBuilder() .setId(Protos.OfferID.newBuilder().setValue(offerUUID)) .setFrameworkId(Protos.FrameworkID.newBuilder().setValue(frameworkId)) .setSlaveId(Protos.SlaveID.newBuilder().setValue(taskInfo.getSlaveId().getValue())) .setHostname("127.0.0.1") .addAllResources(taskInfo.getResourcesList()) .addAllResources(taskInfo.getExecutor().getResourcesList()) .addAllResources(templateTaskInfo.getResourcesList()) .build(); }
@Test public void testCreateCassandraContainer() throws Exception { CassandraContainer container = getTestCassandraContainer(); Collection<Protos.TaskInfo> taskInfos = container.getTaskInfos(); Assert.assertEquals(2, taskInfos.size()); Iterator<Protos.TaskInfo> iter = taskInfos.iterator(); Protos.TaskInfo daemonTaskInfo = iter.next(); Protos.TaskInfo clusterTemplateTaskInfo = iter.next(); validateDaemonTaskInfo(daemonTaskInfo); Assert.assertEquals(CassandraTemplateTask.toTemplateTaskName(daemonTaskInfo.getName()), clusterTemplateTaskInfo.getName()); Assert.assertEquals(2, clusterTemplateTaskInfo.getResourcesCount()); Assert.assertTrue(clusterTemplateTaskInfo.getTaskId().getValue().isEmpty()); for (Protos.Resource resource : clusterTemplateTaskInfo.getResourcesList()) { Assert.assertTrue(ResourceUtils.getResourceId(resource).isEmpty()); } }
/** * Returns a list of all the resources associated with a task, including {@link Executor} resources. * * @param taskInfo The {@link Protos.TaskInfo} containing the {@link Protos.Resource}. * @return a list of {@link Protos.Resource}s. */ public static List<Protos.Resource> getAllResources(Protos.TaskInfo taskInfo) { // Get all resources from both the task level and the executor level List<Protos.Resource> resources = new ArrayList<>(taskInfo.getResourcesList()); if (taskInfo.hasExecutor()) { resources.addAll(taskInfo.getExecutor().getResourcesList()); } return resources; }
private void validateDaemonTaskInfo(Protos.TaskInfo daemonTaskInfo) throws TaskException { Assert.assertEquals(testDaemonName, daemonTaskInfo.getName()); Assert.assertEquals(4, daemonTaskInfo.getResourcesCount()); Assert.assertEquals(testDaemonName, TaskUtils.toTaskName(daemonTaskInfo.getTaskId())); Assert.assertTrue(daemonTaskInfo.getSlaveId().getValue().isEmpty()); for (Protos.Resource resource : daemonTaskInfo.getResourcesList()) { Assert.assertTrue(ResourceUtils.getResourceId(resource).isEmpty()); } }
private static List<Protos.Resource> getExpectedResources(Collection<OfferRecommendation> operations) { for (OfferRecommendation operation : operations) { if (operation.getOperation().get().getType().equals(Offer.Operation.Type.LAUNCH_GROUP)) { return Stream.concat( operation.getOperation().get().getLaunchGroup().getTaskGroup().getTasksList().stream() .flatMap(taskInfo -> taskInfo.getResourcesList().stream()), operation.getOperation().get().getLaunchGroup().getExecutor().getResourcesList().stream()) .collect(Collectors.toList()); } } return Collections.emptyList(); }
public String getVolumePath() { return TaskUtils.getVolumePaths( getTaskInfo().getResourcesList()) .get(0); }
@Override public void expect(ClusterState state, SchedulerDriver mockDriver) throws AssertionError { Optional<Protos.TaskInfo> task = new StateStore(persisterWithTasks).fetchTask(taskName); Assert.assertTrue(String.format("Task %s not found", taskName), task.isPresent()); Assert.assertEquals(String.format("Expected zero resources, got: %s", task.get().getResourcesList()), 0, task.get().getResourcesCount()); } }