String jobId = t.getTaskAttemptId().getJobID().toString(); if (firstError) { console.printError("Examining task ID: " + taskId + " (and more) from job " + jobId);
String jobId = t.getTaskAttemptId().getJobID().toString(); if (firstError) { console.printError("Examining task ID: " + taskId + " (and more) from job " + jobId);
for (TaskReport task : client.getMapTaskReports(tasks[0].getTaskAttemptId().getJobID())) { if (task.getCurrentStatus().equals(TIPStatus.FAILED)) { for (String s : task.getDiagnostics()) {
/** * test without TASK_LOG_DIR * * @throws IOException */ @Test (timeout=50000) public void testTaskLogWithoutTaskLogDir() throws IOException { // TaskLog tasklog= new TaskLog(); System.clearProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR); // test TaskLog assertEquals(TaskLog.getMRv2LogDir(), null); TaskAttemptID taid = mock(TaskAttemptID.class); JobID jid = new JobID("job", 1); when(taid.getJobID()).thenReturn(jid); when(taid.toString()).thenReturn("JobId"); File f = TaskLog.getTaskLogFile(taid, true, LogName.STDOUT); assertTrue(f.getAbsolutePath().endsWith("stdout")); }
JobID jid = new JobID("job", 1); when(taid.getJobID()).thenReturn(jid); when(taid.toString()).thenReturn("JobId");
/** * Get the job name for this task. * @return the job name */ public JobID getJobID() { return taskId.getJobID(); }
conf.setInt(org.apache.hadoop.mapreduce.lib.output. FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, version); JobContext jContext = new JobContextImpl(conf, taskID.getJobID()); TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID); FileOutputCommitter committer = new FileOutputCommitter();
private void testMapOnlyNoOutputInternal(int version) throws Exception { JobConf conf = new JobConf(); //This is not set on purpose. FileOutputFormat.setOutputPath(conf, outDir); conf.set(JobContext.TASK_ATTEMPT_ID, attempt); conf.setInt(org.apache.hadoop.mapreduce.lib.output. FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, version); JobContext jContext = new JobContextImpl(conf, taskID.getJobID()); TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID); FileOutputCommitter committer = new FileOutputCommitter(); // setup committer.setupJob(jContext); committer.setupTask(tContext); if(committer.needsTaskCommit(tContext)) { // do commit committer.commitTask(tContext); } committer.commitJob(jContext); // validate output FileUtil.fullyDelete(new File(outDir.toString())); }
conf.setInt(MRConstants.APPLICATION_ATTEMPT_ID, 1); FileOutputFormat.setOutputPath(conf, outDir); JobContext jContext = new JobContextImpl(conf, taskID.getJobID()); TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID); FileOutputCommitter committer = new FileOutputCommitter();
private void testMapFileOutputCommitterInternal(int version) throws Exception { JobConf conf = new JobConf(); FileOutputFormat.setOutputPath(conf, outDir); conf.set(JobContext.TASK_ATTEMPT_ID, attempt); conf.setInt(org.apache.hadoop.mapreduce.lib.output. FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, version); JobContext jContext = new JobContextImpl(conf, taskID.getJobID()); TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID); FileOutputCommitter committer = new FileOutputCommitter(); // setup committer.setupJob(jContext); committer.setupTask(tContext); // write output MapFileOutputFormat theOutputFormat = new MapFileOutputFormat(); RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(null, conf, partFile, null); writeMapFileOutput(theRecordWriter, tContext); // do commit if(committer.needsTaskCommit(tContext)) { committer.commitTask(tContext); } committer.commitJob(jContext); // validate output validateMapFileOutputContent(FileSystem.get(conf), outDir); FileUtil.fullyDelete(new File(outDir.toString())); }
FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, commitVersion); JobContext jContext = new JobContextImpl(conf, taskID.getJobID()); TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID); FileOutputCommitter committer = new FileOutputCommitter(); FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, recoveryVersion); JobContext jContext2 = new JobContextImpl(conf2, taskID.getJobID()); TaskAttemptContext tContext2 = new TaskAttemptContextImpl(conf2, taskID); FileOutputCommitter committer2 = new FileOutputCommitter();
private void testCommitterInternal(int version) throws Exception { JobConf conf = new JobConf(); FileOutputFormat.setOutputPath(conf, outDir); conf.set(JobContext.TASK_ATTEMPT_ID, attempt); conf.setInt(org.apache.hadoop.mapreduce.lib.output. FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, version); JobContext jContext = new JobContextImpl(conf, taskID.getJobID()); TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID); FileOutputCommitter committer = new FileOutputCommitter(); // setup committer.setupJob(jContext); committer.setupTask(tContext); // write output TextOutputFormat theOutputFormat = new TextOutputFormat(); RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(null, conf, partFile, null); writeOutput(theRecordWriter, tContext); // do commit if(committer.needsTaskCommit(tContext)) { committer.commitTask(tContext); } committer.commitJob(jContext); // validate output validateContent(outDir); FileUtil.fullyDelete(new File(outDir.toString())); }
static File getAttemptDir(TaskAttemptID taskid, boolean isCleanup) { String cleanupSuffix = isCleanup ? ".cleanup" : ""; return new File(getJobDir(taskid.getJobID()), taskid + cleanupSuffix); } private static long prevOutLength;
protected List<TaskAttemptID> getRunningTasksForJob(JobID jobId) { List<TaskAttemptID> running = new ArrayList<TaskAttemptID>(); synchronized (this) { for (TaskAttemptID attemptId: runningTasks.keySet()) { if (jobId.equals(attemptId.getJobID())) { running.add(attemptId); } } } return running; }
@Override public synchronized void failedReduce(TaskAttemptID taskAttemptID) { ++numReduceTasksFailed; addWaitingReduces(taskAttemptID.getJobID(), 1); }
conf.setInt(org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter. FILEOUTPUTCOMMITTER_FAILURE_ATTEMPTS, maxAttempts); JobContext jContext = new JobContextImpl(conf, taskID.getJobID()); TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID); FileOutputCommitter committer = new CommitterWithFailedThenSucceed();
conf.setInt(org.apache.hadoop.mapreduce.lib.output. FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, version); JobContext jContext = new JobContextImpl(conf, taskID.getJobID()); TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID); FileOutputCommitter committer = new FileOutputCommitter();
/** * Obtain the owner of the log dir. This is * determined by checking the job's log directory. */ static String obtainLogDirOwner(TaskAttemptID taskid) throws IOException { Configuration conf = new Configuration(); FileSystem raw = FileSystem.getLocal(conf).getRaw(); Path jobLogDir = new Path(getJobDir(taskid.getJobID()).getAbsolutePath()); FileStatus jobStat = raw.getFileStatus(jobLogDir); return jobStat.getOwner(); }
@Override public synchronized void launchReduce(TaskAttemptID taskAttemptID) { aggregateJobStats.incNumReduceTasksLaunched(); decWaitingReduces(taskAttemptID.getJobID(), 1); }
/** * A child task had a local filesystem error. Kill the task. */ public synchronized void fsError(TaskAttemptID taskId, String message) throws IOException { ensureAuthorizedJVM(taskId.getJobID()); internalFsError(taskId, message); }