private void createMapTasks(JobImpl job, long inputLength, TaskSplitMetaInfo[] splits) { for (int i=0; i < job.numMapTasks; ++i) { TaskImpl task = new MapTaskImpl(job.jobId, i, job.eventHandler, job.remoteJobConfFile, job.conf, splits[i], job.taskAttemptListener, job.jobToken, job.jobCredentials, job.clock, job.applicationAttemptId.getAttemptId(), job.metrics, job.appContext); job.addTask(task); } LOG.info("Input size for job " + job.jobId + " = " + inputLength + ". Number of splits = " + splits.length); }
/** Improvement: if all reducers have finished, we check if we have restarted mappers that are still running. This can happen in a situation when a node becomes UNHEALTHY and mappers are rescheduled. See MAPREDUCE-6870 for details */ private void checkReadyForCompletionWhenAllReducersDone(JobImpl job) { if (job.finishJobWhenReducersDone) { int totalReduces = job.getTotalReduces(); int completedReduces = job.getCompletedReduces(); if (totalReduces > 0 && totalReduces == completedReduces && !job.completingJob) { for (TaskId mapTaskId : job.mapTasks) { MapTaskImpl task = (MapTaskImpl) job.tasks.get(mapTaskId); if (!task.isFinished()) { LOG.info("Killing map task " + task.getID()); job.eventHandler.handle( new TaskEvent(task.getID(), TaskEventType.T_KILL)); } } job.completingJob = true; } } } }
/** * @return a String formatted as a comma-separated list of splits. */ @Override protected String getSplitsAsString() { String[] splits = getTaskSplitMetaInfo().getLocations(); if (splits == null || splits.length == 0) return ""; StringBuilder sb = new StringBuilder(); for (int i = 0; i < splits.length; i++) { if (i != 0) sb.append(","); sb.append(splits[i]); } return sb.toString(); } }
mockEventHandler); TaskId taskId = recoverMapTask.getID(); JobID jobID = new JobID(Long.toString(clusterTimestamp), 1); TaskID taskID = new TaskID(jobID, when(mockTaskInfo.getAllTaskAttempts()).thenReturn(mockTaskAttempts); recoverMapTask.handle( new TaskRecoverEvent(taskId, mockTaskInfo, mockCommitter, true));
@Override protected TaskAttemptImpl createAttempt() { return new MapTaskAttemptImpl(getID(), nextAttemptNumber, eventHandler, jobFile, partition, taskSplitMetaInfo, conf, taskAttemptListener, jobToken, credentials, clock, appContext); }
long expectedMapLaunches, long expectedFailedMaps) { assertEquals("Final State of Task", finalState, checkTask.getState()); checkTask.getAttempts(); assertEquals("Expected Number of Task Attempts", finalAttemptStates.size(), recoveredAttempts.size());
mockEventHandler); TaskId taskId = recoverMapTask.getID(); JobID jobID = new JobID(Long.toString(clusterTimestamp), 1); TaskID taskID = new TaskID(jobID, when(mockTaskInfo.getAllTaskAttempts()).thenReturn(mockTaskAttempts); recoverMapTask.handle( new TaskRecoverEvent(taskId, mockTaskInfo,mockCommitter, true));
@Override protected TaskAttemptImpl createAttempt() { return new MapTaskAttemptImpl(getID(), nextAttemptNumber, eventHandler, jobFile, partition, taskSplitMetaInfo, conf, taskAttemptListener, jobToken, credentials, clock, appContext); }
mockEventHandler); TaskId taskId = recoverMapTask.getID(); JobID jobID = new JobID(Long.toString(clusterTimestamp), 1); TaskID taskID = new TaskID(jobID, when(mockTaskInfo.getAllTaskAttempts()).thenReturn(mockTaskAttempts); recoverMapTask.handle( new TaskRecoverEvent(taskId, mockTaskInfo, mockCommitter, true));
private void createMapTasks(JobImpl job, long inputLength, TaskSplitMetaInfo[] splits) { for (int i=0; i < job.numMapTasks; ++i) { TaskImpl task = new MapTaskImpl(job.jobId, i, job.eventHandler, job.remoteJobConfFile, job.conf, splits[i], job.taskAttemptListener, job.jobToken, job.jobCredentials, job.clock, job.applicationAttemptId.getAttemptId(), job.metrics, job.appContext); job.addTask(task); } LOG.info("Input size for job " + job.jobId + " = " + inputLength + ". Number of splits = " + splits.length); }
@Override protected TaskAttemptImpl createAttempt() { return new MapTaskAttemptImpl(getID(), nextAttemptNumber, eventHandler, jobFile, partition, taskSplitMetaInfo, conf, taskAttemptListener, jobToken, credentials, clock, appContext); }
/** * @return a String formatted as a comma-separated list of splits. */ @Override protected String getSplitsAsString() { String[] splits = getTaskSplitMetaInfo().getLocations(); if (splits == null || splits.length == 0) return ""; StringBuilder sb = new StringBuilder(); for (int i = 0; i < splits.length; i++) { if (i != 0) sb.append(","); sb.append(splits[i]); } return sb.toString(); } }
mockEventHandler); TaskId taskId = recoverMapTask.getID(); JobID jobID = new JobID(Long.toString(clusterTimestamp), 1); TaskID taskID = new TaskID(jobID, when(mockTaskInfo.getAllTaskAttempts()).thenReturn(mockTaskAttempts); recoverMapTask.handle( new TaskRecoverEvent(taskId, mockTaskInfo, mockCommitter, true));
private void createMapTasks(JobImpl job, long inputLength, TaskSplitMetaInfo[] splits) { for (int i=0; i < job.numMapTasks; ++i) { TaskImpl task = new MapTaskImpl(job.jobId, i, job.eventHandler, job.remoteJobConfFile, job.conf, splits[i], job.taskAttemptListener, job.jobToken, job.jobCredentials, job.clock, job.applicationAttemptId.getAttemptId(), job.metrics, job.appContext); job.addTask(task); } LOG.info("Input size for job " + job.jobId + " = " + inputLength + ". Number of splits = " + splits.length); }
/** * @return a String formatted as a comma-separated list of splits. */ @Override protected String getSplitsAsString() { String[] splits = getTaskSplitMetaInfo().getLocations(); if (splits == null || splits.length == 0) return ""; StringBuilder sb = new StringBuilder(); for (int i = 0; i < splits.length; i++) { if (i != 0) sb.append(","); sb.append(splits[i]); } return sb.toString(); } }
mockEventHandler); TaskId taskId = recoverMapTask.getID(); JobID jobID = new JobID(Long.toString(clusterTimestamp), 1); TaskID taskID = new TaskID(jobID, when(mockTaskInfo.getAllTaskAttempts()).thenReturn(mockTaskAttempts); recoverMapTask.handle( new TaskRecoverEvent(taskId, mockTaskInfo, mockCommitter, true));
private MapTaskImpl getMockMapTask(long clusterTimestamp, EventHandler eh) { ApplicationId appId = ApplicationId.newInstance(clusterTimestamp, 1); JobId jobId = MRBuilderUtils.newJobId(appId, 1); int partitions = 2; Path remoteJobConfFile = mock(Path.class); JobConf conf = new JobConf(); TaskAttemptListener taskAttemptListener = mock(TaskAttemptListener.class); Token<JobTokenIdentifier> jobToken = (Token<JobTokenIdentifier>) mock(Token.class); Credentials credentials = null; Clock clock = SystemClock.getInstance(); int appAttemptId = 3; MRAppMetrics metrics = mock(MRAppMetrics.class); Resource minContainerRequirements = mock(Resource.class); when(minContainerRequirements.getMemorySize()).thenReturn(1000L); ClusterInfo clusterInfo = mock(ClusterInfo.class); AppContext appContext = mock(AppContext.class); when(appContext.getClusterInfo()).thenReturn(clusterInfo); TaskSplitMetaInfo taskSplitMetaInfo = mock(TaskSplitMetaInfo.class); MapTaskImpl mapTask = new MapTaskImpl(jobId, partitions, eh, remoteJobConfFile, conf, taskSplitMetaInfo, taskAttemptListener, jobToken, credentials, clock, appAttemptId, metrics, appContext); return mapTask; }