public static String toString(TaskAttemptId taid) { return taid.toString(); }
public static String toString(TaskAttemptId taid) { return taid.toString(); }
public static String toString(TaskAttemptId taid) { return taid.toString(); }
public static String toString(TaskAttemptId taid) { return taid.toString(); }
private void checkRunning(long currentTime) { Iterator<Map.Entry<TaskAttemptId, ReportTime>> iterator = runningAttempts.entrySet().iterator(); while (iterator.hasNext()) { Map.Entry<TaskAttemptId, ReportTime> entry = iterator.next(); boolean taskTimedOut = (taskTimeOut > 0) && (currentTime > (entry.getValue().getLastProgress() + taskTimeOut)); if(taskTimedOut) { // task is lost, remove from the list and raise lost event iterator.remove(); eventHandler.handle(new TaskAttemptDiagnosticsUpdateEvent(entry .getKey(), "AttemptID:" + entry.getKey().toString() + " Timed out after " + taskTimeOut / 1000 + " secs")); eventHandler.handle(new TaskAttemptEvent(entry.getKey(), TaskAttemptEventType.TA_TIMED_OUT)); } } }
if (LOG.isDebugEnabled()) { LOG.debug("Renaming map output file for task attempt " + mapId.toString() + " from original location " + mapOut.toString() + " to destination " + reduceIn.toString());
if (LOG.isDebugEnabled()) { LOG.debug("Renaming map output file for task attempt " + mapId.toString() + " from original location " + mapOut.toString() + " to destination " + reduceIn.toString());
if (LOG.isDebugEnabled()) { LOG.debug("Renaming map output file for task attempt " + mapId.toString() + " from original location " + mapOut.toString() + " to destination " + reduceIn.toString());
.getKey(), "AttemptID:" + entry.getKey().toString() + " Timed out after " + taskTimeOut / 1000 + " secs")); eventHandler.handle(new TaskAttemptEvent(entry.getKey(),
.getKey(), "AttemptID:" + entry.getKey().toString() + " Timed out after " + taskTimeOut / 1000 + " secs")); eventHandler.handle(new TaskAttemptEvent(entry.getKey(),
YarnException ye = (YarnException) ue.getCause(); if (ye.getCause() instanceof AccessControlException) { String taId = attemptId.toString(); String msg = "Unauthorized attempt to kill task attempt " + taId
YarnException ye = (YarnException) ue.getCause(); if (ye.getCause() instanceof AccessControlException) { String taId = attemptId.toString(); String msg = "Unauthorized attempt to kill task attempt " + taId
@SuppressWarnings("unchecked") @Override public TaskAttemptStateInternal transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) { taskAttempt.appContext.getTaskAttemptFinishingMonitor().unregister( taskAttempt.attemptId); sendContainerCleanup(taskAttempt, event); if(taskAttempt.getID().getTaskId().getTaskType() == TaskType.REDUCE) { // after a reduce task has succeeded, its outputs are in safe in HDFS. // logically such a task should not be killed. we only come here when // there is a race condition in the event queue. E.g. some logic sends // a kill request to this attempt when the successful completion event // for this task is already in the event queue. so the kill event will // get executed immediately after the attempt is marked successful and // result in this transition being exercised. // ignore this for reduce tasks LOG.info("Ignoring killed event for successful reduce task attempt" + taskAttempt.getID().toString()); return TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP; } else { // Store reschedule flag so that after clean up is completed, new // attempt is scheduled/rescheduled based on it. if (event instanceof TaskAttemptKillEvent) { taskAttempt.setRescheduleNextAttempt( ((TaskAttemptKillEvent)event).getRescheduleAttempt()); } return TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP; } } }
@SuppressWarnings("unchecked") @Override public TaskAttemptStateInternal transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) { taskAttempt.appContext.getTaskAttemptFinishingMonitor().unregister( taskAttempt.attemptId); sendContainerCleanup(taskAttempt, event); if(taskAttempt.getID().getTaskId().getTaskType() == TaskType.REDUCE) { // after a reduce task has succeeded, its outputs are in safe in HDFS. // logically such a task should not be killed. we only come here when // there is a race condition in the event queue. E.g. some logic sends // a kill request to this attempt when the successful completion event // for this task is already in the event queue. so the kill event will // get executed immediately after the attempt is marked successful and // result in this transition being exercised. // ignore this for reduce tasks LOG.info("Ignoring killed event for successful reduce task attempt" + taskAttempt.getID().toString()); return TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP; } else { // Store reschedule flag so that after clean up is completed, new // attempt is scheduled/rescheduled based on it. if (event instanceof TaskAttemptKillEvent) { taskAttempt.setRescheduleNextAttempt( ((TaskAttemptKillEvent)event).getRescheduleAttempt()); } return TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP; } } }
SystemClock.getInstance(), null); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, taImpl.getID().toString());
SystemClock.getInstance(), null); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, taImpl.getID().toString());
taskAttempt.getID().toString()); return TaskAttemptStateInternal.SUCCEEDED;
taskAttempt.getID().toString()); return TaskAttemptStateInternal.SUCCEEDED;
taskAttempt.getID().toString()); return TaskAttemptStateInternal.SUCCEEDED;