private SingularityTaskExecutorData readExecutorData(ObjectMapper objectMapper, Protos.TaskInfo taskInfo) { try { Preconditions.checkState(taskInfo.hasData(), "TaskInfo was missing executor data"); return objectMapper.readValue(taskInfo.getData().toByteArray(), SingularityTaskExecutorData.class); } catch (Exception e) { throw Throwables.propagate(e); } }
private HamaConfiguration configure(final TaskInfo task) { Configuration conf = new Configuration(false); try { byte[] bytes = task.getData().toByteArray(); conf.readFields(new DataInputStream(new ByteArrayInputStream(bytes))); } catch (IOException e) { LOG.warn("Failed to deserialize configuraiton.", e); System.exit(1); } // Set the local directories inside the executor sandbox, so that // different Grooms on the same host do not step on each other. conf.set("bsp.local.dir", System.getProperty("user.dir") + "/bsp/local"); conf.set("bsp.tmp.dir", System.getProperty("user.dir") + "/bsp/tmp"); conf.set("bsp.disk.queue.dir", System.getProperty("user.dir") + "/bsp/diskQueue"); conf.set("hama.disk.vertices.path", System.getProperty("user.dir") + "/bsp/graph"); return new HamaConfiguration(conf); }
private JobConf configure(final TaskInfo task) { JobConf conf = new JobConf(false); try { byte[] bytes = task.getData().toByteArray(); conf.readFields(new DataInputStream(new ByteArrayInputStream(bytes))); } catch (IOException e) {
CassandraData data = CassandraData.parse(info.getData()); switch (data.getType()) { case CASSANDRA_DAEMON:
@Override public void run() { Thread.currentThread().setContextClassLoader(TaskThread.class.getClassLoader()); executorDriver.sendStatusUpdate(Protos.TaskStatus.newBuilder().setTaskId(taskInfo.getTaskId()).setState(Protos.TaskState.TASK_RUNNING).build()); Map<String, Object> data = SerializationUtils.deserialize(taskInfo.getData().toByteArray()); ShardingContexts shardingContexts = (ShardingContexts) data.get("shardingContext"); @SuppressWarnings("unchecked") JobConfigurationContext jobConfig = new JobConfigurationContext((Map<String, String>) data.get("jobConfigContext")); try { ElasticJob elasticJob = getElasticJobInstance(jobConfig); final CloudJobFacade jobFacade = new CloudJobFacade(shardingContexts, jobConfig, jobEventBus); if (jobConfig.isTransient()) { JobExecutorFactory.getJobExecutor(elasticJob, jobFacade).execute(); executorDriver.sendStatusUpdate(Protos.TaskStatus.newBuilder().setTaskId(taskInfo.getTaskId()).setState(Protos.TaskState.TASK_FINISHED).build()); } else { new DaemonTaskScheduler(elasticJob, jobConfig, jobFacade, executorDriver, taskInfo.getTaskId()).init(); } // CHECKSTYLE:OFF } catch (final Throwable ex) { // CHECKSTYLE:ON log.error("Elastic-Job-Cloud-Executor error", ex); executorDriver.sendStatusUpdate(Protos.TaskStatus.newBuilder().setTaskId(taskInfo.getTaskId()).setState(Protos.TaskState.TASK_ERROR).setMessage(ExceptionUtil.transform(ex)).build()); executorDriver.stop(); throw ex; } }
@Override public void run() { executorDriver.sendStatusUpdate(Protos.TaskStatus.newBuilder().setTaskId(taskInfo.getTaskId()).setState(Protos.TaskState.TASK_RUNNING).build()); Map<String, Object> data = SerializationUtils.deserialize(taskInfo.getData().toByteArray()); ShardingContexts shardingContexts = (ShardingContexts) data.get("shardingContext"); @SuppressWarnings("unchecked") JobConfigurationContext jobConfig = new JobConfigurationContext((Map<String, String>) data.get("jobConfigContext")); try { ElasticJob elasticJob = getElasticJobInstance(jobConfig); final CloudJobFacade jobFacade = new CloudJobFacade(shardingContexts, jobConfig, jobEventBus); if (jobConfig.isTransient()) { JobExecutorFactory.getJobExecutor(elasticJob, jobFacade).execute(); executorDriver.sendStatusUpdate(Protos.TaskStatus.newBuilder().setTaskId(taskInfo.getTaskId()).setState(Protos.TaskState.TASK_FINISHED).build()); } else { new DaemonTaskScheduler(elasticJob, jobConfig, jobFacade, executorDriver, taskInfo.getTaskId()).init(); } // CHECKSTYLE:OFF } catch (final Throwable ex) { // CHECKSTYLE:ON log.error("Elastic-Job-Cloud-Executor error", ex); executorDriver.sendStatusUpdate(Protos.TaskStatus.newBuilder().setTaskId(taskInfo.getTaskId()).setState(Protos.TaskState.TASK_ERROR).setMessage(ExceptionUtil.transform(ex)).build()); executorDriver.stop(); throw ex; } }
protected CassandraData getData() { return CassandraData.parse(info.getData()); }
@Test public void testMoveDaemon() throws Exception { CassandraDaemonTask originalDaemonTask = cassandraState.createDaemon(testDaemonName); originalDaemonTask = originalDaemonTask.update(getTestOffer()); Assert.assertEquals(testHostName, originalDaemonTask.getHostname()); CassandraDaemonTask movedDaemonTask = cassandraState.moveDaemon(originalDaemonTask); CassandraData movedData = CassandraData.parse(movedDaemonTask.getTaskInfo().getData()); String movedReplaceIp = movedData.getConfig().getReplaceIp(); Assert.assertEquals(originalDaemonTask.getHostname(), movedReplaceIp); }