@Override @Deprecated public void cleanupJob(org.apache.hadoop.mapred.JobContext context) throws IOException { // no-op // added for compatibility with hadoop 0.20.x (used by old tools, such as Cascalog) for (OutputCommitter committer : committers) { committer.cleanupJob(context); } } }
@Override public void cleanupJob(JobContext context) throws IOException { getBaseOutputCommitter().cleanupJob(HCatMapRedUtil.createJobContext(context)); //Cancel HCat and JobTracker tokens IMetaStoreClient client = null; try { HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration()); client = HCatUtil.getHiveMetastoreClient(hiveConf); String tokenStrForm = client.getTokenStrForm(); if (tokenStrForm != null && context.getConfiguration().get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) { client.cancelDelegationToken(tokenStrForm); } } catch (Exception e) { LOG.warn("Failed to cancel delegation token", e); } finally { HCatUtil.closeHiveClientQuietly(client); } } }
/** * For committing job's output after successful job completion. Note that this * is invoked for jobs with final runstate as SUCCESSFUL. This is called * from the application master process for the entire job. This is guaranteed * to only be called once. If it throws an exception the entire job will * fail. * * @param jobContext Context of the job whose output is being written. * @throws IOException */ public void commitJob(JobContext jobContext) throws IOException { cleanupJob(jobContext); }
/** * For aborting an unsuccessful job's output. Note that this is invoked for * jobs with final runstate as {@link JobStatus.State#FAILED} or * {@link JobStatus.State#KILLED}. * * @param jobContext Context of the job whose output is being written. * @param state final runstate of the job * @throws IOException */ public void abortJob(JobContext jobContext, JobStatus.State state) throws IOException { cleanupJob(jobContext); }
/** * For committing job's output after successful job completion. Note that this * is invoked for jobs with final runstate as SUCCESSFUL. This is called * from the application master process for the entire job. This is guaranteed * to only be called once. If it throws an exception the entire job will * fail. * * @param jobContext Context of the job whose output is being written. * @throws IOException */ public void commitJob(JobContext jobContext) throws IOException { cleanupJob(jobContext); }
/** * For committing job's output after successful job completion. Note that this * is invoked for jobs with final runstate as SUCCESSFUL. * * @param jobContext Context of the job whose output is being written. * @throws IOException */ public void commitJob(JobContext jobContext) throws IOException { cleanupJob(jobContext); }
/** * For cleaning up the job's output after job completion. Note that this * is invoked for jobs with final run state as * {@link JobStatus.State#SUCCEEDED} * * @param jobContext Context of the job whose output is being written. * @throws IOException */ public void commitJob(JobContext jobContext) throws IOException { cleanupJob(jobContext); }
/** * For aborting an unsuccessful job's output. Note that this is invoked for * jobs with final run state as {@link JobStatus.State#FAILED} or * {@link JobStatus.State#KILLED}. * @param jobContext Context of the job whose output is being written. * @param state final run state of the job, should be either * {@link JobStatus.State#KILLED} or {@link JobStatus.State#FAILED} * @throws IOException */ public void abortJob(JobContext jobContext, JobStatus.State state) throws IOException { cleanupJob(jobContext); }
/** * For committing job's output after successful job completion. Note that this * is invoked for jobs with final runstate as SUCCESSFUL. This is called * from the application master process for the entire job. This is guaranteed * to only be called once. If it throws an exception the entire job will * fail. * * @param jobContext Context of the job whose output is being written. * @throws IOException */ public void commitJob(JobContext jobContext) throws IOException { cleanupJob(jobContext); }
@Override @Deprecated public void cleanupJob(org.apache.hadoop.mapred.JobContext context) throws IOException { // no-op // added for compatibility with hadoop 0.20.x (used by old tools, such as Cascalog) for (OutputCommitter committer : committers) { committer.cleanupJob(context); } } }
@Override @Deprecated public void cleanupJob(JobContext jobContext) throws IOException { baseCommitter.cleanupJob(jobContext); }
@Override @Deprecated public void cleanupJob(org.apache.hadoop.mapred.JobContext context) throws IOException { // no-op // added for compatibility with hadoop 0.20.x (used by old tools, such as Cascalog) for (OutputCommitter committer : committers) { committer.cleanupJob(context); } } }
@Override @Deprecated public void cleanupJob(org.apache.hadoop.mapred.JobContext context) throws IOException { // no-op // added for compatibility with hadoop 0.20.x (used by old tools, such as Cascalog) for (OutputCommitter committer : committers) { committer.cleanupJob(context); } } }
@Override @Deprecated public void cleanupJob(JobContext context) throws IOException { delegate.cleanupJob(context); }
@Override @Deprecated public void cleanupJob(org.apache.hadoop.mapred.JobContext context) throws IOException { // no-op // added for compatibility with hadoop 0.20.x (used by old tools, such as Cascalog) for (OutputCommitter committer : committers) { committer.cleanupJob(context); } } }
@SuppressWarnings("deprecation") @Override public void cleanupJob(JobContext jobContext) throws IOException { committer.cleanupJob(jobContext); } @Override
@Override public void cleanupJob(JobContext context) throws IOException { try { baseOutputCommitter.cleanupJob(context); } finally { cleanupScratch(context); } }
@Override public void cleanupJob(JobContext context) throws IOException { outputCommitter.cleanupJob( HadoopUtils.makeJobContext(getConf(), context)); }
@Override public void cleanupJob(JobContext context) throws IOException { outputCommitter.cleanupJob( HadoopUtils.makeJobContext(getConf(), context)); }
@SuppressWarnings("deprecation") @Override public void cleanupJob(JobContext jobContext) throws IOException { logJob(jobContext); org.apache.hadoop.mapreduce.TaskAttemptContext taskContext = toTaskAttemptContext(jobContext); committer(taskContext).cleanupJob(taskContext); }