@Override public ProcessBuilder call() throws Exception { if (task.getTaskInfo().hasContainer() && task.getTaskInfo().getContainer().hasDocker()) { executorUtils.sendStatusUpdate(task.getDriver(), task.getTaskInfo().getTaskId(), TaskState.TASK_STARTING, String.format("Pulling image... (executor pid: %s)", executorPid), task.getLog()); try { dockerUtils.pull(task.getTaskInfo().getContainer().getDocker().getImage()); } catch (DockerException e) { throw new ProcessFailedException("Could not pull docker image", e); executorUtils.sendStatusUpdate(task.getDriver(), task.getTaskInfo().getTaskId(), TaskState.TASK_STARTING, String.format("Staging files... (executor pid: %s)", executorPid), task.getLog());
+ " -J-Xms" + String.valueOf((int)igniteTask.mem()) + "m"); return Protos.TaskInfo.newBuilder() .setName("Ignite node " + taskId.getValue()) .setTaskId(taskId)
if (op.getType() == Protos.Offer.Operation.Type.LAUNCH) { for (Protos.TaskInfo info : op.getLaunch().getTaskInfosList()) { MesosWorkerStore.Worker worker = workersInNew.remove(extractResourceID(info.getTaskId())); assert (worker != null); worker = worker.launchWorker(info.getSlaveId(), msg.hostname()); workerStore.putWorker(worker); workersInLaunch.put(extractResourceID(worker.taskID()), worker);
.newBuilder() .setState(Protos.TaskState.TASK_FAILED) .setSlaveId(info.getSlaveId()) .setExecutorId(info.getExecutor().getExecutorId()) .setTaskId(info.getTaskId()) .setMessage(String.format("Task not implemented: type = %s", cassandraTask.getType())) .build();
testTaskInfo); Protos.TaskInfo taskInfo = requirement.getTaskRequirements().iterator().next().getTaskInfo(); Assert.assertEquals(taskInfo.getName(), "test-daemon"); Assert.assertTrue(taskInfo.getTaskId().getValue().contains("test-daemon")); Assert.assertEquals(taskInfo.getSlaveId().getValue(), ""); List<Protos.Resource> resources = taskInfo.getResourcesList(); Assert.assertEquals(4, resources.size());
driver, Protos.TaskState.TASK_FAILED, taskInfo.getTaskId(), taskInfo.getSlaveId(), taskInfo.getExecutor().getExecutorId(), ex.getMessage()); driver.abort();
CassandraData data = CassandraData.parse(info.getData()); switch (data.getType()) { case CASSANDRA_DAEMON:
testPrincipal); testTaskInfo = Protos.TaskInfo.newBuilder() .setTaskId(initTaskInfo.getTaskId()) .setName(initTaskInfo.getName()) .setSlaveId(initTaskInfo.getSlaveId()) .addAllResources(Arrays.asList(cpu, mem, disk, ports)) .setExecutor(initTaskInfo.getExecutor()) .build();
if (taskToAvoid.getSlaveId().equals(offer.getSlaveId())) {
if (taskToColocate.getSlaveId().equals(offer.getSlaveId())) {
private JobConf configure(final TaskInfo task) { JobConf conf = new JobConf(false); try { byte[] bytes = task.getData().toByteArray(); conf.readFields(new DataInputStream(new ByteArrayInputStream(bytes))); } catch (IOException e) {
@Test public void testUpdateCpuConfig() { CassandraDaemonTask daemonTask = testTaskFactory.create( TEST_DAEMON_NAME, TEST_CONFIG_NAME, testTaskExecutor, CassandraConfig.DEFAULT); double newCpu = 1.0; CassandraConfig updatedConfig = CassandraConfig.create( "2.2.5", newCpu, 4096, 10240, VolumeRequirement.VolumeType.ROOT, "", HeapConfig.DEFAULT, Location.DEFAULT, 7199, false, UUID.randomUUID().toString(), CassandraApplicationConfig.builder().build()); CassandraDaemonTask updatedTask = daemonTask.updateConfig( updatedConfig, testExecutorConfig, TEST_CONFIG_ID); Assert.assertNotEquals(normalizeCassandraTaskInfo(daemonTask), normalizeCassandraTaskInfo(updatedTask)); Assert.assertEquals(newCpu, updatedTask.getConfig().getCpus(), 0.0); Assert.assertTrue(allUrisAreCacheable(updatedTask.getTaskInfo().getExecutor().getCommand().getUrisList(), false)); }
assertEquals(configuration.getTaskName(), taskInfo.getName()); assertEquals(offer.getSlaveId(), taskInfo.getSlaveId()); assertEquals("elasticsearch_localhost_19700101T010203.400Z", taskInfo.getTaskId().getValue()); List<Protos.Resource> resourceList = taskInfo.getResourcesList(); assertEquals(configuration.getCpus(), getResourceByName(resourceList, "cpus").getScalar().getValue(), EPSILON); assertEquals(9300, portsList.get(1).getRanges().getRange(0).getEnd()); assertEquals(9200, taskInfo.getDiscovery().getPorts().getPorts(0).getNumber()); assertEquals(9300, taskInfo.getDiscovery().getPorts().getPorts(1).getNumber()); assertEquals(Protos.DiscoveryInfo.Visibility.EXTERNAL, taskInfo.getDiscovery().getVisibility()); assertEquals(2, taskInfo.getContainer().getVolumesCount()); assertEquals(Configuration.CONTAINER_PATH_DATA, taskInfo.getContainer().getVolumes(0).getContainerPath()); assertEquals(Configuration.DEFAULT_HOST_DATA_DIR + "/" + configuration.getElasticsearchClusterName() + "/" + offer.getSlaveId().getValue(), taskInfo.getContainer().getVolumes(0).getHostPath()); assertEquals(Protos.Volume.Mode.RW, taskInfo.getContainer().getVolumes(0).getMode()); assertEquals(Configuration.CONTAINER_PATH_CONF_YML, taskInfo.getContainer().getVolumes(1).getContainerPath()); assertEquals(Configuration.HOST_PATH_CONF, taskInfo.getContainer().getVolumes(1).getHostPath()); assertEquals(Protos.Volume.Mode.RO, taskInfo.getContainer().getVolumes(1).getMode());
@Test public void testLaunchExpectedMultiplePorts() throws Exception { // Launch for the first time: get ports 10000,10001 PodInstanceRequirement podInstanceRequirement = PodInstanceRequirementTestUtils.getPortRequirement(10000, 10001); List<Resource> reserveResources = recordLaunchWithCompleteOfferedResources( podInstanceRequirement, ResourceTestUtils.getUnreservedPorts(10000, 10001)); Assert.assertEquals(reserveResources.toString(), 5, reserveResources.size()); String resourceId0 = getResourceId(reserveResources.get(3)); String resourceId1 = getResourceId(reserveResources.get(4)); Collection<Resource> expectedResources = getExpectedExecutorResources( stateStore.fetchTasks().iterator().next().getExecutor()); expectedResources.addAll(Arrays.asList( ResourceTestUtils.getReservedPorts(10000, 10000, resourceId0), ResourceTestUtils.getReservedPorts(10001, 10001, resourceId1))); // Now try relaunch: List<OfferRecommendation> recommendations = evaluator.evaluate( PodInstanceRequirementTestUtils.getPortRequirement(10000, 10001), Arrays.asList(OfferTestUtils.getOffer(expectedResources))); Assert.assertEquals(2, recommendations.size()); // Validate LAUNCH Operation Operation launchOperation = recommendations.get(0).getOperation().get(); Assert.assertEquals(Operation.Type.LAUNCH_GROUP, launchOperation.getType()); List<Resource> launchResources = launchOperation.getLaunchGroup().getTaskGroup().getTasks(0).getResourcesList(); Assert.assertEquals(launchResources.toString(), 2, launchResources.size()); Assert.assertEquals(resourceId0, getResourceId(launchResources.get(0))); Assert.assertEquals(resourceId1, getResourceId(launchResources.get(1))); Assert.assertFalse(recommendations.get(1).getOperation().isPresent()); }
@Override public void run() { Thread.currentThread().setContextClassLoader(TaskThread.class.getClassLoader()); executorDriver.sendStatusUpdate(Protos.TaskStatus.newBuilder().setTaskId(taskInfo.getTaskId()).setState(Protos.TaskState.TASK_RUNNING).build()); Map<String, Object> data = SerializationUtils.deserialize(taskInfo.getData().toByteArray()); ShardingContexts shardingContexts = (ShardingContexts) data.get("shardingContext"); @SuppressWarnings("unchecked") JobConfigurationContext jobConfig = new JobConfigurationContext((Map<String, String>) data.get("jobConfigContext")); try { ElasticJob elasticJob = getElasticJobInstance(jobConfig); final CloudJobFacade jobFacade = new CloudJobFacade(shardingContexts, jobConfig, jobEventBus); if (jobConfig.isTransient()) { JobExecutorFactory.getJobExecutor(elasticJob, jobFacade).execute(); executorDriver.sendStatusUpdate(Protos.TaskStatus.newBuilder().setTaskId(taskInfo.getTaskId()).setState(Protos.TaskState.TASK_FINISHED).build()); } else { new DaemonTaskScheduler(elasticJob, jobConfig, jobFacade, executorDriver, taskInfo.getTaskId()).init(); } // CHECKSTYLE:OFF } catch (final Throwable ex) { // CHECKSTYLE:ON log.error("Elastic-Job-Cloud-Executor error", ex); executorDriver.sendStatusUpdate(Protos.TaskStatus.newBuilder().setTaskId(taskInfo.getTaskId()).setState(Protos.TaskState.TASK_ERROR).setMessage(ExceptionUtil.transform(ex)).build()); executorDriver.stop(); throw ex; } }
testTaskInfo); Protos.TaskInfo taskInfo = requirement.getTaskRequirements().iterator().next().getTaskInfo(); Assert.assertEquals(taskInfo.getName(), "test-daemon"); Assert.assertTrue(taskInfo.getTaskId().getValue().contains("test-daemon")); Assert.assertEquals("", taskInfo.getSlaveId().getValue()); List<Protos.Resource> resources = taskInfo.getResourcesList(); Assert.assertEquals(4, resources.size());
@Override public void run() { executorDriver.sendStatusUpdate(Protos.TaskStatus.newBuilder().setTaskId(taskInfo.getTaskId()).setState(Protos.TaskState.TASK_RUNNING).build()); Map<String, Object> data = SerializationUtils.deserialize(taskInfo.getData().toByteArray()); ShardingContexts shardingContexts = (ShardingContexts) data.get("shardingContext"); @SuppressWarnings("unchecked") JobConfigurationContext jobConfig = new JobConfigurationContext((Map<String, String>) data.get("jobConfigContext")); try { ElasticJob elasticJob = getElasticJobInstance(jobConfig); final CloudJobFacade jobFacade = new CloudJobFacade(shardingContexts, jobConfig, jobEventBus); if (jobConfig.isTransient()) { JobExecutorFactory.getJobExecutor(elasticJob, jobFacade).execute(); executorDriver.sendStatusUpdate(Protos.TaskStatus.newBuilder().setTaskId(taskInfo.getTaskId()).setState(Protos.TaskState.TASK_FINISHED).build()); } else { new DaemonTaskScheduler(elasticJob, jobConfig, jobFacade, executorDriver, taskInfo.getTaskId()).init(); } // CHECKSTYLE:OFF } catch (final Throwable ex) { // CHECKSTYLE:ON log.error("Elastic-Job-Cloud-Executor error", ex); executorDriver.sendStatusUpdate(Protos.TaskStatus.newBuilder().setTaskId(taskInfo.getTaskId()).setState(Protos.TaskState.TASK_ERROR).setMessage(ExceptionUtil.transform(ex)).build()); executorDriver.stop(); throw ex; } }
@Test public void testUnexpectedPermanentlyFailedResources() throws Exception { install(); StateStore stateStore = new StateStore(persister); // Pick an arbitrary task with resources: Protos.TaskInfo taskInfo = stateStore.fetchTasks().iterator().next(); Assert.assertFalse(taskInfo.getResourcesList().isEmpty()); // Verify that the task's resources are currently expected: UnexpectedResourcesResponse response = defaultScheduler.getUnexpectedResources(OfferTestUtils.getOffers(taskInfo.getResourcesList())); Assert.assertEquals(UnexpectedResourcesResponse.Result.PROCESSED, response.result); Assert.assertTrue(response.offerResources.isEmpty()); // Mark the task as permanently failed: stateStore.storeTasks(Collections.singletonList( taskInfo.toBuilder() .setLabels(new TaskLabelWriter(taskInfo).setPermanentlyFailed().toProto()) .build())); // Verify that the task's resources are no longer expected: response = defaultScheduler.getUnexpectedResources(OfferTestUtils.getOffers(taskInfo.getResourcesList())); Assert.assertEquals(UnexpectedResourcesResponse.Result.PROCESSED, response.result); Assert.assertEquals(1, response.offerResources.size()); Assert.assertEquals(taskInfo.getResourcesList(), response.offerResources.iterator().next().getResources()); }
@Test public void testGetUpdateOfferRequirement() throws Exception { OfferRequirement requirement = provider.getNewOfferRequirement( CassandraTask.TYPE.CASSANDRA_DAEMON.name(), testTaskInfo); Protos.TaskInfo taskInfo = requirement.getTaskRequirements().iterator().next().getTaskInfo(); Assert.assertEquals(taskInfo.getName(), "test-daemon"); Assert.assertTrue(taskInfo.getTaskId().getValue().contains("test-daemon")); Assert.assertEquals("", taskInfo.getSlaveId().getValue()); List<Protos.Resource> resources = taskInfo.getResourcesList(); Assert.assertEquals(4, resources.size()); Protos.Resource cpusResource = resources.get(0); Assert.assertEquals("cpus", cpusResource.getName()); Assert.assertEquals(testCpus, cpusResource.getScalar().getValue(), 0.0); Protos.Resource memResource = resources.get(1); Assert.assertEquals("mem", memResource.getName()); Assert.assertEquals(testMem, memResource.getScalar().getValue(), 0.0); Protos.Resource diskResource = resources.get(2); Assert.assertEquals("disk", diskResource.getName()); Assert.assertEquals(testDisk, diskResource.getScalar().getValue(), 0.0); Protos.Resource portsResource = resources.get(3); Assert.assertEquals("ports", portsResource.getName()); Assert.assertTrue(portsResource.getRanges().getRangeList().get(0).getBegin() >= testPortBegin); Assert.assertTrue(portsResource.getRanges().getRangeList().get(0).getEnd() >= testPortBegin); }
@Test public void testLaunchExpectedStaticPort() throws Exception { // Launch for the first time: get port 555 PodInstanceRequirement podInstanceRequirement = PodInstanceRequirementTestUtils.getPortRequirement(555); Resource reserveResource = recordLaunchWithCompleteOfferedResources( podInstanceRequirement, ResourceTestUtils.getUnreservedPorts(555, 555)).get(3); String resourceId = getResourceId(reserveResource); Collection<Resource> expectedResources = getExpectedExecutorResources( stateStore.fetchTasks().iterator().next().getExecutor()); expectedResources.add(ResourceTestUtils.getReservedPorts(555, 555, resourceId)); // Launch on previously reserved resources List<OfferRecommendation> recommendations = evaluator.evaluate( podInstanceRequirement, Arrays.asList(OfferTestUtils.getOffer(expectedResources))); Assert.assertEquals(2, recommendations.size()); // Validate LAUNCH Operation Protos.Offer.Operation launchOperation = recommendations.get(0).getOperation().get(); Assert.assertEquals(Operation.Type.LAUNCH_GROUP, launchOperation.getType()); Protos.Resource launchResource = launchOperation.getLaunchGroup().getTaskGroup().getTasks(0).getResources(0); Assert.assertEquals(resourceId, getResourceId(launchResource)); Assert.assertFalse(recommendations.get(1).getOperation().isPresent()); }