+ "status=" + JobStatus.getJobRunState(rj.getJobState()) + ")");
if (initializing && rj.getJobState() == JobStatus.PREP) {
if (initializing && rj.getJobState() == JobStatus.PREP) {
private Integer getJobStateSafe() throws IOException { try { return runningJob.getJobState(); } catch( NullPointerException exception ) // this happens { return null; } }
private Integer getJobStateSafe() throws IOException { try { return runningJob.getJobState(); } catch( NullPointerException exception ) // this happens { return null; } }
public boolean isJobPreparing(RunningJob job) throws IOException { return job.getJobState() == JobStatus.PREP; } /**
protected int jobFailed(Job job, RunningJob runningJob, FileSystem fs, Path workDir) throws IOException { log.error("Map Reduce job " + job.getJobName() + " was unsuccessful. Check the logs."); log.error("Since job was not successful, deleting work directory: " + workDir); boolean deleted = fs.delete(workDir, true); if (!deleted) { log.error("Unable to remove job working directory: " + workDir); } if (runningJob.getJobState() == JobStatus.KILLED) { log.warn("Job was killed"); return -2; } else { log.error("Job failed with a jobstate of " + runningJob.getJobState()); return -3; } }
void runJobAsUser(final JobConf job, UserGroupInformation ugi) throws Exception { RunningJob rj = ugi.doAs(new PrivilegedExceptionAction<RunningJob>() { public RunningJob run() throws IOException { return JobClient.runJob(job); } }); rj.waitForCompletion(); assertEquals("SUCCEEDED", JobStatus.getJobRunState(rj.getJobState())); }
public void testCommitFail() throws IOException { final Path inDir = new Path(rootDir, "./input"); final Path outDir = new Path(rootDir, "./output"); JobConf jobConf = createJobConf(); jobConf.setMaxMapAttempts(1); jobConf.setOutputCommitter(CommitterWithCommitFail.class); RunningJob rJob = UtilsForTests.runJob(jobConf, inDir, outDir, 1, 0); rJob.waitForCompletion(); assertEquals(JobStatus.FAILED, rJob.getJobState()); }
private void testKilledJob(JobConf job, MyListener myListener) throws IOException { LOG.info("Testing job-kill"); Path inDir = new Path(TEST_ROOT_DIR + "/jiplistenerkilljob/input"); Path outDir = new Path(TEST_ROOT_DIR + "/jiplistenerkilljob/output"); job.setNumMapTasks(1); job.setNumReduceTasks(0); // submit and kill the job RunningJob rJob = UtilsForTests.runJobKill(job, inDir, outDir); JobID id = rJob.getID(); // check if the job failure was notified assertFalse("Missing event notification on killing a running job", myListener.contains(id)); // check if killed assertEquals("Job failed!", JobStatus.KILLED, rJob.getJobState()); }
private void testFailCommitter(Class<? extends OutputCommitter> theClass, JobConf jobConf) throws IOException { jobConf.setOutputCommitter(theClass); RunningJob job = UtilsForTests.runJob(jobConf, inDir, outDir); // wait for the job to finish. job.waitForCompletion(); assertEquals(JobStatus.FAILED, job.getJobState()); }
private void testFailedJob(JobConf job, MyListener myListener) throws IOException { LOG.info("Testing job-fail"); Path inDir = new Path(TEST_ROOT_DIR + "/jiplistenerfailjob/input"); Path outDir = new Path(TEST_ROOT_DIR + "/jiplistenerfailjob/output"); job.setNumMapTasks(1); job.setNumReduceTasks(0); job.setMaxMapAttempts(1); // submit a job that fails RunningJob rJob = UtilsForTests.runJobFail(job, inDir, outDir); JobID id = rJob.getID(); // check if the job failure was notified assertFalse("Missing event notification on failing a running job", myListener.contains(id)); // check if failed assertEquals("Job failed!", JobStatus.FAILED, rJob.getJobState()); }
/** * Runs a job that will succeed and verifies if the subprocesses of succeeded * map task are killed properly or not. */ private static void runSuccessfulJobAndValidate(JobTracker jt, JobConf conf) throws IOException { conf.setJobName("testsucceedjobsubprocesses"); conf.setMapperClass(MapperWithChildren.class); RunningJob job = runJobAndSetProcessHandle(jt, conf); signalTask(signalFile.toString(), conf); validateKillingSubprocesses(job, conf); // Checking the Job status assertEquals(job.getJobState(), JobStatus.SUCCEEDED); }
/** * Runs a job, kills the job and verifies if the map task and its * subprocesses are also killed properly or not. */ private static void runKillingJobAndValidate(JobTracker jt, JobConf conf) throws IOException { conf.setJobName("testkilljobsubprocesses"); conf.setMapperClass(KillingMapperWithChildren.class); RunningJob job = runJobAndSetProcessHandle(jt, conf); // kill the job now job.killJob(); while (job.cleanupProgress() == 0.0f) { try { Thread.sleep(100); } catch (InterruptedException ie) { LOG.warn("sleep is interrupted:" + ie); break; } } validateKillingSubprocesses(job, conf); // Checking the Job status assertEquals(job.getJobState(), JobStatus.KILLED); }
void runJobAsUser(final JobConf job, UserGroupInformation ugi) throws Exception { RunningJob rj = ugi.doAs(new PrivilegedExceptionAction<RunningJob>() { public RunningJob run() throws IOException { return JobClient.runJob(job); } }); rj.waitForCompletion(); Assert.assertEquals("SUCCEEDED", JobStatus.getJobRunState(rj.getJobState())); }
/** * Runs a job that will fail and verifies if the subprocesses of failed map * task are killed properly or not. */ private static void runFailingJobAndValidate(JobTracker jt, JobConf conf) throws IOException { conf.setJobName("testfailjobsubprocesses"); conf.setMapperClass(FailingMapperWithChildren.class); // We don't want to run the failing map task 4 times. So we run it once and // check if all the subprocesses are killed properly. conf.setMaxMapAttempts(1); RunningJob job = runJobAndSetProcessHandle(jt, conf); signalTask(signalFile.toString(), conf); validateKillingSubprocesses(job, conf); // Checking the Job status assertEquals(job.getJobState(), JobStatus.FAILED); }
public void testCommitFail() throws IOException { final Path inDir = new Path(rootDir, "./input"); final Path outDir = new Path(rootDir, "./output"); JobConf jobConf = createJobConf(); jobConf.setMaxMapAttempts(1); jobConf.setOutputCommitter(CommitterWithCommitFail.class); RunningJob rJob = UtilsForTests.runJob(jobConf, inDir, outDir, 1, 0); rJob.waitForCompletion(); assertEquals(JobStatus.FAILED, rJob.getJobState()); }
private static void executeJavaMapReduce(String[] args) throws IOException, InterruptedException { JobConf jConf = createSleepMapperReducerJobConf(); final Path input = new Path(args[1]); FileInputFormat.setInputPaths(jConf, input); FileOutputFormat.setOutputPath(jConf, new Path(args[2])); writeToFile(input, jConf, "dummy\n", "data.txt"); JobClient jc = new JobClient(jConf); System.out.println("Submitting MR job"); RunningJob job = jc.submitJob(jConf); System.out.println("Submitted job " + job.getID().toString()); writeToFile(input, jConf, job.getID().toString(), JOB_ID_FILE_NAME); job.waitForCompletion(); jc.monitorAndPrintJob(jConf, job); if (job.getJobState() != JobStatus.SUCCEEDED) { System.err.println(job.getJobState() + " job state instead of" + JobStatus.SUCCEEDED); System.exit(-1); } }
private void validateJob(RunningJob job, JobTracker jt) throws IOException { assertEquals(JobStatus.SUCCEEDED, job.getJobState()); JobID jobId = job.getID(); // construct the task id of first map task // this should not be cleanup attempt since the first attempt // fails with an exception TaskAttemptID attemptId = new TaskAttemptID(new TaskID(jobId, TaskType.MAP, 0), 0); TaskInProgress tip = jt.getTip(attemptId.getTaskID()); TaskStatus ts = jt.getTaskStatus(attemptId); validateAttempt(tip, attemptId, ts, false, jt); attemptId = new TaskAttemptID(new TaskID(jobId, TaskType.MAP, 0), 1); // this should be cleanup attempt since the second attempt fails // with System.exit ts = jt.getTaskStatus(attemptId); validateAttempt(tip, attemptId, ts, true, jt); attemptId = new TaskAttemptID(new TaskID(jobId, TaskType.MAP, 0), 2); // this should be cleanup attempt since the third attempt fails // with Error ts = jt.getTaskStatus(attemptId); validateAttempt(tip, attemptId, ts, true, jt); }
private void validateJob(RunningJob job, MiniMRCluster mr) throws IOException { assertEquals(JobStatus.SUCCEEDED, job.getJobState()); long uses = job.getCounters().findCounter("jvm", "use").getValue(); assertTrue("maps = " + numMappers + ", jvms = " + uses, numMappers < uses); JobID jobId = job.getID(); for (int i = 0; i < numMappers; i++) { TaskAttemptID attemptId = new TaskAttemptID(new TaskID(jobId, TaskType.MAP, i), 0); TaskInProgress tip = mr.getJobTrackerRunner().getJobTracker().getTip( attemptId.getTaskID()); TaskStatus ts = mr.getJobTrackerRunner().getJobTracker().getTaskStatus( attemptId); validateAttempt(tip, attemptId, ts, i == taskWithCleanup); if (i == taskWithCleanup) { // validate second attempt of the task attemptId = new TaskAttemptID(new TaskID(jobId, TaskType.MAP, i), 1); ts = mr.getJobTrackerRunner().getJobTracker().getTaskStatus(attemptId); validateAttempt(tip, attemptId, ts, false); } } }