@Override public void commitJob(JobContext jobContext) throws IOException { for (String alias : outputCommitters.keySet()) { LOGGER.info("Calling commitJob for alias: " + alias); BaseOutputCommitterContainer outputContainer = outputCommitters.get(alias); outputContainer.getBaseCommitter().commitJob(outputContainer.getContext()); } }
@Override public void commitTask(TaskAttemptContext taskContext) throws IOException { for (String alias : outputCommitters.keySet()) { BaseOutputCommitterContainer outputContainer = outputCommitters.get(alias); OutputCommitter baseCommitter = outputContainer.getBaseCommitter(); TaskAttemptContext committerContext = outputContainer.getContext(); if (baseCommitter.needsTaskCommit(committerContext)) { LOGGER.info("Calling commitTask for alias: " + alias); baseCommitter.commitTask(committerContext); } } }
@Override public void setupJob(JobContext jobContext) throws IOException { for (OutputCommitter committer : committers) { committer.setupJob(jobContext); } }
/** {@inheritDoc} */ @Override public void run0(HadoopV2TaskContext taskCtx) throws IgniteCheckedException { JobContextImpl jobCtx = taskCtx.jobContext(); try { OutputFormat outputFormat = getOutputFormat(jobCtx); OutputCommitter committer = outputFormat.getOutputCommitter(hadoopContext()); if (committer != null) { if (abort) committer.abortJob(jobCtx, JobStatus.State.FAILED); else committer.commitJob(jobCtx); } } catch (ClassNotFoundException | IOException e) { throw new IgniteCheckedException(e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IgniteInterruptedCheckedException(e); } } }
private void publishTest(Job job) throws Exception { HCatOutputFormat hcof = new HCatOutputFormat(); TaskAttemptContext tac = ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext( job.getConfiguration(), ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptID()); OutputCommitter committer = hcof.getOutputCommitter(tac); committer.setupJob(job); committer.setupTask(tac); committer.commitTask(tac); committer.commitJob(job); Partition part = client.getPartition(dbName, tblName, Arrays.asList("p1")); assertNotNull(part); StorerInfo storer = InternalUtil.extractStorerInfo(part.getSd(), part.getParameters()); assertEquals(storer.getProperties().get("hcat.testarg"), "testArgValue"); assertTrue(part.getSd().getLocation().contains("p1")); }
try { committer = outFormat.getOutputCommitter(cntxt); committer.setupTask(cntxt); writer = outFormat.getRecordWriter(cntxt); while (recordItr.hasNext()) { if (committer.needsTaskCommit(cntxt)) { committer.commitTask(cntxt); committer.abortTask(cntxt); } catch (IOException e1) { throw new HCatException(ErrorType.ERROR_INTERNAL_EXCEPTION, e1); if (null != committer) { try { committer.abortTask(cntxt); } catch (IOException e1) { throw new HCatException(ErrorType.ERROR_INTERNAL_EXCEPTION, e1);
@Override public void abortJob(JobContext jobContext, State state) throws IOException { for (String alias : outputCommitters.keySet()) { LOGGER.info("Calling abortJob for alias: " + alias); BaseOutputCommitterContainer outputContainer = outputCommitters.get(alias); outputContainer.getBaseCommitter().abortJob(outputContainer.getContext(), state); } } }
@Override public void abortTask(org.apache.hadoop.mapred.TaskAttemptContext taskContext) throws IOException { for (OutputCommitter committer : committers) { committer.abortTask(taskContext); } }
@Override public void setupTask(org.apache.hadoop.mapred.TaskAttemptContext taskContext) throws IOException { for (OutputCommitter committer : committers) { committer.setupTask(taskContext); } }
@Override public void commitTask(TaskAttemptContext taskContext) throws IOException { for (OutputCommitter committer : committers) { committer.commitTask(taskContext); } }
hof.getOutputCommitter(context).commitTask(context); hof.getOutputCommitter(context).commitJob(context); FileStatus[] families = FSUtils.listStatus(fs, dir, new FSUtils.FamilyDirFilter(fs)); assertEquals(htd.getFamilies().size(), families.length);
@Override @Deprecated public void cleanupJob(org.apache.hadoop.mapred.JobContext context) throws IOException { // no-op // added for compatibility with hadoop 0.20.x (used by old tools, such as Cascalog) for (OutputCommitter committer : committers) { committer.cleanupJob(context); } } }
@Override public Void call() throws IOException, InterruptedException { final OutputCommitter outputCommitter = tof[taskIdx].getOutputCommitter(taCtx[taskIdx]); outputCommitter.setupTask(taCtx[taskIdx]); final RecordWriter rw = tof[taskIdx].getRecordWriter(taCtx[taskIdx]); writeOutput(rw, taCtx[taskIdx]); outputCommitter.commitTask(taCtx[taskIdx]); return null; } });
@Override public boolean needsTaskCommit(TaskAttemptContext taskContext) throws IOException { boolean result = false; for (OutputCommitter committer : committers) { result |= committer.needsTaskCommit(taskContext); } return result; }
@Override public void abortJob(JobContext jobContext, State state) throws IOException { getBaseOutputCommitter().abortJob(HCatMapRedUtil.createJobContext(jobContext), state); cleanupJob(jobContext); }
@Override public void abortTask(TaskAttemptContext taskContext) throws IOException { for (OutputCommitter committer : committers) { committer.abortTask(taskContext); } } }
@Override public void setupTask(TaskAttemptContext taskContext) throws IOException { for (OutputCommitter committer : committers) { committer.setupTask(taskContext); } }
@Override public void commitTask(org.apache.hadoop.mapred.TaskAttemptContext taskContext) throws IOException { for (OutputCommitter committer : committers) { committer.commitTask(taskContext); } }
@Override public void cleanupJob(JobContext context) throws IOException { getBaseOutputCommitter().cleanupJob(HCatMapRedUtil.createJobContext(context)); //Cancel HCat and JobTracker tokens IMetaStoreClient client = null; try { HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration()); client = HCatUtil.getHiveMetastoreClient(hiveConf); String tokenStrForm = client.getTokenStrForm(); if (tokenStrForm != null && context.getConfiguration().get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) { client.cancelDelegationToken(tokenStrForm); } } catch (Exception e) { LOG.warn("Failed to cancel delegation token", e); } finally { HCatUtil.closeHiveClientQuietly(client); } } }
@Override public boolean needsTaskCommit(org.apache.hadoop.mapred.TaskAttemptContext taskContext) throws IOException { boolean result = false; for (OutputCommitter committer : committers) { result |= committer.needsTaskCommit(taskContext); } return result; }