() -> String.valueOf(e)); getWorkQueue().scheduleAndSync(new AbortAllJobsWork(this, ccId)); HeartbeatManager hbMgr = heartbeatManagers.remove(ccId); if (hbMgr != null) {
@Override protected void doRun() throws Exception { LOGGER.info("Aborting all tasks for controller {}", ccId); IResultPartitionManager resultPartitionManager = ncs.getResultPartitionManager(); if (resultPartitionManager == null) { LOGGER.log(Level.WARN, "ResultPartitionManager is null on " + ncs.getId()); } Deque<Task> abortedTasks = new ArrayDeque<>(); Collection<Joblet> joblets = ncs.getJobletMap().values(); // TODO(mblow): should we have one jobletmap per cc? joblets.stream().filter(joblet -> joblet.getJobId().getCcId().equals(ccId)).forEach(joblet -> { joblet.getTaskMap().values().forEach(task -> { task.abort(); abortedTasks.add(task); }); final JobId jobId = joblet.getJobId(); if (resultPartitionManager != null) { resultPartitionManager.abortReader(jobId); resultPartitionManager.sweep(jobId); } ncs.getWorkQueue().schedule(new CleanupJobletWork(ncs, jobId, JobStatus.FAILURE)); }); ncs.getExecutor().submit(new EnsureAllCcTasksCompleted(ncs, ccId, abortedTasks)); } }
exceptions.add(HyracksDataException.create(TASK_ABORTED, getTaskAttemptId())); ExceptionUtils.setNodeIds(exceptions, ncs.getId()); ncs.getWorkQueue() .schedule(new NotifyTaskFailureWork(ncs, this, exceptions, joblet.getJobId(), taskAttemptId)); return; throw operatorException; ncs.getWorkQueue().schedule(new NotifyTaskCompleteWork(ncs, this)); } catch (Throwable e) { // NOSONAR: Catch all failures exceptions.add(HyracksDataException.create(e)); ncs.getWorkQueue() .schedule(new NotifyTaskFailureWork(ncs, this, exceptions, joblet.getJobId(), taskAttemptId));
ExceptionUtils.setNodeIds(exceptions, ncs.getId()); TaskAttemptId taskId = taskDescriptors.get(taskIndex).getTaskAttemptId(); ncs.getWorkQueue().schedule(new NotifyTaskFailureWork(ncs, task, exceptions, jobId, taskId));
case SEND_APPLICATION_MESSAGE: CCNCFunctions.SendApplicationMessageFunction amf = (CCNCFunctions.SendApplicationMessageFunction) fn; ncs.getWorkQueue().schedule( new ApplicationMessageWork(ncs, amf.getMessage(), amf.getDeploymentId(), amf.getNodeId())); return; case START_TASKS: CCNCFunctions.StartTasksFunction stf = (CCNCFunctions.StartTasksFunction) fn; ncs.getWorkQueue() .schedule(new StartTasksWork(ncs, stf.getDeploymentId(), stf.getJobId(), stf.getPlanBytes(), stf.getTaskDescriptors(), stf.getConnectorPolicies(), stf.getFlags(), case ABORT_TASKS: CCNCFunctions.AbortTasksFunction atf = (CCNCFunctions.AbortTasksFunction) fn; ncs.getWorkQueue().schedule(new AbortTasksWork(ncs, atf.getJobId(), atf.getTasks())); return; case ABORT_ALL_JOBS: CCNCFunctions.AbortCCJobsFunction aajf = (CCNCFunctions.AbortCCJobsFunction) fn; ncs.getWorkQueue().schedule(new AbortAllJobsWork(ncs, aajf.getCcId())); return; case CLEANUP_JOBLET: CCNCFunctions.CleanupJobletFunction cjf = (CCNCFunctions.CleanupJobletFunction) fn; ncs.getWorkQueue().schedule(new CleanupJobletWork(ncs, cjf.getJobId(), cjf.getStatus())); return; case REPORT_PARTITION_AVAILABILITY: CCNCFunctions.ReportPartitionAvailabilityFunction rpaf = (CCNCFunctions.ReportPartitionAvailabilityFunction) fn; ncs.getWorkQueue().schedule( new ReportPartitionAvailabilityWork(ncs, rpaf.getPartitionId(), rpaf.getNetworkAddress())); return;