@Override public void abortJob(JobContext jobContext, State state) throws IOException { for (String alias : outputCommitters.keySet()) { LOGGER.info("Calling abortJob for alias: " + alias); BaseOutputCommitterContainer outputContainer = outputCommitters.get(alias); outputContainer.getBaseCommitter().abortJob(outputContainer.getContext(), state); } } }
@Override public void abortJob(JobContext jobContext, State state) throws IOException { getBaseOutputCommitter().abortJob(HCatMapRedUtil.createJobContext(jobContext), state); cleanupJob(jobContext); }
cleanUpWorkingDirectory(mrJobDir, fs); } finally { super.abortJob(jobContext, state);
/** {@inheritDoc} */ @Override public void run0(HadoopV2TaskContext taskCtx) throws IgniteCheckedException { JobContextImpl jobCtx = taskCtx.jobContext(); try { OutputFormat outputFormat = getOutputFormat(jobCtx); OutputCommitter committer = outputFormat.getOutputCommitter(hadoopContext()); if (committer != null) { if (abort) committer.abortJob(jobCtx, JobStatus.State.FAILED); else committer.commitJob(jobCtx); } } catch (ClassNotFoundException | IOException e) { throw new IgniteCheckedException(e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IgniteInterruptedCheckedException(e); } } }
.createJobContext(jobContext); if (getBaseOutputCommitter() != null && !dynamicPartitioningUsed) { getBaseOutputCommitter().abortJob(mapRedJobContext, state); } else if (dynamicPartitioningUsed) { for (JobContext currContext : contextDiscoveredByPath.values()) {
@Override public void abort(WriterContext context) throws HCatException { WriterContextImpl cntxtImpl = (WriterContextImpl)context; try { new HCatOutputFormat().getOutputCommitter( ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext( cntxtImpl.getConf(), ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptID())) .abortJob(ShimLoader.getHadoopShims().getHCatShim().createJobContext( cntxtImpl.getConf(), null), State.FAILED); } catch (IOException e) { throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e); } catch (InterruptedException e) { throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e); } }
@Override public void abortJob(JobContext jobContext, JobStatus.State state) throws IOException { rootOutputcommitter.abortJob(jobContext, state); for (Map.Entry<String, OutputCommitter> committer : committers.entrySet()) { JobContext namedJobContext = MultipleOutputs.getNamedJobContext(jobContext, committer.getKey()); committer.getValue().abortJob(namedJobContext, state); } }
@Override public void abortJob(JobContext jobContext, JobStatus.State state) throws IOException { rootOutputcommitter.abortJob(jobContext, state); for (Map.Entry<String, OutputCommitter> committer : committers.entrySet()) { JobContext namedJobContext = MultipleOutputs.getNamedJobContext(jobContext, committer.getKey()); committer.getValue().abortJob(namedJobContext, state); } }
@Override public void abortJob(JobContext jobContext, JobStatus.State state) throws IOException { Configuration conf = jobContext.getConfiguration(); for (Map.Entry<String, OutputCommitter> e : committers.entrySet()) { Job job = getJob(jobContext.getJobID(), e.getKey(), conf); configureJob(e.getKey(), job, outputs.get(e.getKey())); e.getValue().abortJob(job, state); } } }
@Override public void abortOutput(VertexStatus.State finalState) throws IOException { if (!initialized) { throw new RuntimeException("Committer not initialized"); } JobStatus.State jobState = getJobStateFromVertexStatusState(finalState); committer.abortJob(jobContext, jobState); }
@Override public void abortJob(JobContext jobContext, State state) throws IOException { for (String alias : outputCommitters.keySet()) { LOGGER.info("Calling abortJob for alias: " + alias); BaseOutputCommitterContainer outputContainer = outputCommitters.get(alias); outputContainer.getBaseCommitter().abortJob(outputContainer.getContext(), state); } } }
@Override public void abortJob(JobContext jobContext, State state) throws IOException { for (String alias : outputCommitters.keySet()) { LOGGER.info("Calling abortJob for alias: " + alias); BaseOutputCommitterContainer outputContainer = outputCommitters.get(alias); outputContainer.getBaseCommitter().abortJob(outputContainer.getContext(), state); } } }
@Override public void abortJob(JobContext context, JobStatus.State state) throws IOException { outputCommitter.abortJob( HadoopUtils.makeJobContext(getConf(), context), state); }
@Override public void abortJob(JobContext jobContext, State state) throws IOException { getBaseOutputCommitter().abortJob(HCatMapRedUtil.createJobContext(jobContext), state); cleanupJob(jobContext); }
@Override public void abortJob(JobContext jobContext, State state) throws IOException { getBaseOutputCommitter().abortJob(HCatMapRedUtil.createJobContext(jobContext), state); cleanupJob(jobContext); }
@Override public void abortJob(JobContext jobContext, State state) throws IOException { getBaseOutputCommitter().abortJob(HCatMapRedUtil.createJobContext(jobContext), state); cleanupJob(jobContext); }
@Override public void abortJob(JobContext jobContext, int status) throws IOException { logJob(jobContext); JobStatus.State state = convert(status); org.apache.hadoop.mapreduce.TaskAttemptContext taskContext = toTaskAttemptContext(jobContext); committer(taskContext).abortJob(taskContext, state); }
@Override public void rollback() throws IOException { try { final TaskAttemptContext cleanupContext = HadoopUtils.createCleanupTaskContext(conf.get(), jobID.get()); getOutputFormat(cleanupContext.getTaskAttemptID()) .getOutputCommitter(cleanupContext) .abortJob(cleanupContext, JobStatus.State.FAILED); } catch (Exception e) { throw new IOException("Unable to rollback output", e); } }
@SuppressWarnings("unchecked") protected void handleJobAbort(CommitterJobAbortEvent event) { cancelJobCommit(); try { committer.abortJob(event.getJobContext(), event.getFinalState()); } catch (Exception e) { LOG.warn("Could not abort job", e); } context.getEventHandler().handle(new JobAbortCompletedEvent( event.getJobID(), event.getFinalState())); }
@SuppressWarnings("unchecked") protected void handleJobAbort(CommitterJobAbortEvent event) { cancelJobCommit(); try { committer.abortJob(event.getJobContext(), event.getFinalState()); } catch (Exception e) { LOG.warn("Could not abort job", e); } context.getEventHandler().handle(new JobAbortCompletedEvent( event.getJobID(), event.getFinalState())); }