if (report.getStartTime() < jobStartTime || jobStartTime == 0L) { jobStartTime = report.getStartTime(); TIPStatus status = report.getCurrentStatus(); if (status != TIPStatus.PENDING && status != TIPStatus.RUNNING) { finishedMappersCount++; if (jobLastUpdateTime < report.getFinishTime()) { jobLastUpdateTime = report.getFinishTime(); TIPStatus status = report.getCurrentStatus(); if (status != TIPStatus.PENDING && status != TIPStatus.RUNNING) { finishedReducersCount++;
if (tasks.length > 0) { for (TaskReport task : client.getMapTaskReports(tasks[0].getTaskAttemptId().getJobID())) { if (task.getCurrentStatus().equals(TIPStatus.FAILED)) { for (String s : task.getDiagnostics()) { System.err.println("task error: " + s);
@Override public boolean equals(Object o) { if(o == null) return false; if(o.getClass().equals(TaskReport.class)) { TaskReport report = (TaskReport) o; return counters.contentEquals(report.getCounters()) && Arrays.toString(this.diagnostics) .equals(Arrays.toString(report.getDiagnostics())) && this.finishTime == report.getFinishTime() && this.progress == report.getProgress() && this.startTime == report.getStartTime() && this.state.equals(report.getState()) && this.taskid.equals(report.getTaskID()); } return false; }
private List<MRTaskInfo> toMRTaskInfos(TaskReport[] taskReports) { List<MRTaskInfo> taskInfos = Lists.newArrayList(); for (TaskReport taskReport : taskReports) { taskInfos.add(new MRTaskInfo(taskReport.getTaskId(), taskReport.getState(), taskReport.getStartTime(), taskReport.getFinishTime(), taskReport.getProgress(), groupToMap(taskReport.getCounters().getGroup(TaskCounter.class.getName())))); } return taskInfos; }
long wait = t.getStartTime() - timingMap.get("hadoop_start"); min = wait < min ? wait : min; max = wait > max ? wait : max; mean += wait; long runTime = t.getFinishTime() - t.getStartTime(); totalTaskTime += runTime; minRun = runTime < minRun ? runTime : minRun;
for (TaskReport report : jobClient.getSetupTaskReports(oldJobId)) taskIdToType.put(report.getTaskID().toString(),"SETUP"); if (report.getStartTime() == 0) setupStart = Math.min(setupStart, report.getStartTime()); for (TaskReport report : mapReports) taskIdToType.put(report.getTaskID().toString(),"MAP"); if (report.getFinishTime() == 0 || report.getStartTime() == 0) minStart = Math.min(minStart, report.getStartTime()); mapStats.addValue(report.getFinishTime() - report.getStartTime()); for (TaskReport report : reduceReports) taskIdToType.put(report.getTaskID().toString(),"REDUCE"); if (report.getFinishTime() == 0 || report.getStartTime() == 0) maxFinish = Math.max(maxFinish, report.getFinishTime()); reduceStats.addValue(report.getFinishTime() - report.getStartTime()); taskIdToType.put(report.getTaskID().toString(),"CLEANUP"); if (report.getFinishTime() == 0) cleanupFinish = Math.max(cleanupFinish, report.getFinishTime());
/** * Creates a "status report" for this task. Includes the * task ID and overall status, plus reports for all the * component task-threads that have ever been started. */ synchronized TaskReport generateSingleReport() { ArrayList<String> diagnostics = new ArrayList<String>(); for (List<String> l : taskDiagnosticData.values()) { diagnostics.addAll(l); } TaskReport report = new TaskReport (getTIPId(), (float)progress, state, diagnostics.toArray(new String[diagnostics.size()]), execStartTime, execFinishTime, counters); return report; }
Job job = ...; job.waitForCompletion(); TaskReport[] reports = job.getTaskReports(TaskType.MAP); for(TaskReport report : reports) { long time = report.getFinishTime() - report.getStartTime(); System.out.println(report.getTaskId() + " took " + time + " millis!"); }
@Test(timeout = 5000) public void testTaskID() throws IOException, InterruptedException { JobID jobid = new JobID("1014873536921", 6); TaskID tid = new TaskID(jobid, TaskType.MAP, 0); org.apache.hadoop.mapred.TaskID tid1 = org.apache.hadoop.mapred.TaskID.downgrade(tid); org.apache.hadoop.mapred.TaskReport treport = new org.apache.hadoop.mapred.TaskReport(tid1, 0.0f, State.FAILED.toString(), null, TIPStatus.FAILED, 100, 100, new org.apache.hadoop.mapred.Counters()); Assert .assertEquals(treport.getTaskId(), "task_1014873536921_0006_m_000000"); Assert.assertEquals(treport.getTaskID().toString(), "task_1014873536921_0006_m_000000"); } }
TaskReport report = new TaskReport (getTIPId(), (float)progress, state, diagnostics.toArray(new String[diagnostics.size()]), currentStatus, execStartTime, execFinishTime, counters); if (currentStatus == TIPStatus.RUNNING) { report.setRunningTaskAttempts(activeTasks.keySet()); } else if (currentStatus == TIPStatus.COMPLETE) { report.setSuccessfulAttempt(getSuccessfulTaskid());
private TaskReport[] getTaskReports(final JobID jobId, TaskType type) throws IOException { try { Job j = getJobUsingCluster(jobId); if(j == null) { return EMPTY_TASK_REPORTS; } return TaskReport.downgradeArray(j.getTaskReports(type)); } catch (InterruptedException ie) { throw new IOException(ie); } }
TaskReport mapReports[] = jc.getMapTaskReports(rJob.getID()); for (TaskReport mapReport : mapReports) { if (mapReport.getCurrentStatus() == TIPStatus.RUNNING) { runningTasks.addAll(mapReport.getRunningTaskAttempts()); TaskReport reduceReports[] = jc.getReduceTaskReports(rJob.getID()); for (TaskReport reduceReport : reduceReports) { if (reduceReport.getCurrentStatus() == TIPStatus.RUNNING) { runningTasks.addAll(reduceReport.getRunningTaskAttempts());
/**Computes average progress per bar*/ private float getMapAvarageProgress(int tasksPerBar, int index , TaskReport[] reports ) { float progress = 0f; int k=0; for(;k < tasksPerBar && index + k < reports.length; k++) { progress += reports[index + k].getProgress(); } progress /= k; return progress; }
static TaskReport[] downgradeArray(org.apache.hadoop. mapreduce.TaskReport[] reports) { List<TaskReport> ret = new ArrayList<TaskReport>(); for (org.apache.hadoop.mapreduce.TaskReport report : reports) { ret.add(downgrade(report)); } return ret.toArray(new TaskReport[0]); }
private void printTaskAttempts(TaskReport report) { if (report.getCurrentStatus() == TIPStatus.COMPLETE) { System.out.println(report.getSuccessfulTaskAttempt()); } else if (report.getCurrentStatus() == TIPStatus.RUNNING) { for (TaskAttemptID t : report.getRunningTaskAttempts()) { System.out.println(t); } } } /**
Collection<TaskAttemptID> running = report.getRunningTaskAttempts(); for(TaskAttemptID attempt: running){ taskAttemptToProgress.put(attempt, report.getProgress());
private TaskID getTIPId(MiniMRCluster cluster, org.apache.hadoop.mapreduce.JobID jobid) throws Exception { JobClient client = new JobClient(cluster.createJobConf()); JobID jobId = JobID.downgrade(jobid); TaskReport[] mapReports = null; TaskID tipId = null; do { // make sure that the map task is running Thread.sleep(200); mapReports = client.getMapTaskReports(jobId); } while (mapReports.length == 0); for (TaskReport r : mapReports) { tipId = r.getTaskID(); break;// because we have only one map } return tipId; }
.getMapTaskReports(JobID.downgrade(job.getJobID())))); for (TaskReport tr : allTaskReports) { String[] diag = tr.getDiagnostics(); for (String str : diag) { mat = taskOverLimitPattern.matcher(str);
TIPStatus status = report.getCurrentStatus(); if ((state.equals("pending") && status ==TIPStatus.PENDING) || (state.equals("running") && status ==TIPStatus.RUNNING) ||
@SuppressWarnings("deprecation") private long getTaskCounterUsage (JobClient client, JobID id, int numReports, int taskId, TaskType type) throws Exception { TaskReport[] reports = null; if (TaskType.MAP.equals(type)) { reports = client.getMapTaskReports(id); } else if (TaskType.REDUCE.equals(type)) { reports = client.getReduceTaskReports(id); } assertNotNull("No reports found for task type '" + type.name() + "' in job " + id, reports); // make sure that the total number of reports match the expected assertEquals("Mismatch in task id", numReports, reports.length); Counters counters = reports[taskId].getCounters(); return counters.getCounter(TaskCounter.COMMITTED_HEAP_BYTES); }