private void updateMapRedStatsJson(MapRedStats stats, RunningJob rj) throws IOException, JSONException { if (statsJSON == null) { statsJSON = new JSONObject(); } if (stats != null) { if (stats.getNumMap() >= 0) { statsJSON.put(NUMBER_OF_MAPPERS, stats.getNumMap()); } if (stats.getNumReduce() >= 0) { statsJSON.put(NUMBER_OF_REDUCERS, stats.getNumReduce()); } if (stats.getCounters() != null) { statsJSON.put(COUNTERS, getCountersJson(stats.getCounters())); } } if (rj != null) { statsJSON.put(JOB_ID, rj.getID().toString()); statsJSON.put(JOB_FILE, rj.getJobFile()); statsJSON.put(TRACKING_URL, rj.getTrackingURL()); statsJSON.put(MAP_PROGRESS, Math.round(rj.mapProgress() * 100)); statsJSON.put(REDUCE_PROGRESS, Math.round(rj.reduceProgress() * 100)); statsJSON.put(CLEANUP_PROGRESS, Math.round(rj.cleanupProgress() * 100)); statsJSON.put(SETUP_PROGRESS, Math.round(rj.setupProgress() * 100)); statsJSON.put(COMPLETE, rj.isComplete()); statsJSON.put(SUCCESSFUL, rj.isSuccessful()); } }
if (!rj.isSuccessful()) { throw new IOException((compactionType == CompactionType.MAJOR ? "Major" : "Minor") + " compactor job failed for " + jobName + "! Hadoop JobId: " + rj.getID());
txnHandler.setHadoopJobId(rj.getID().toString(), id); rj.waitForCompletion(); if (!rj.isSuccessful()) { throw new IOException(compactionType == CompactionType.MAJOR ? "Major" : "Minor" + " compactor job failed for " + jobName + "! Hadoop JobId: " + rj.getID() );
@Test @SuppressWarnings("deprecation") public void shoudBeValidMapReduceEvaluation() throws Exception { Configuration cfg = UTIL.getConfiguration(); JobConf jobConf = new JobConf(cfg); try { jobConf.setJobName("process row task"); jobConf.setNumReduceTasks(1); TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY), ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class, jobConf); TableMapReduceUtil.initTableReduceJob(TABLE_NAME, ClassificatorRowReduce.class, jobConf); RunningJob job = JobClient.runJob(jobConf); assertTrue(job.isSuccessful()); } finally { if (jobConf != null) FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); } }
@Test @SuppressWarnings("deprecation") public void shoudBeValidMapReduceWithPartitionerEvaluation() throws IOException { Configuration cfg = UTIL.getConfiguration(); JobConf jobConf = new JobConf(cfg); try { jobConf.setJobName("process row task"); jobConf.setNumReduceTasks(2); TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY), ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class, jobConf); TableMapReduceUtil.initTableReduceJob(TABLE_NAME, ClassificatorRowReduce.class, jobConf, HRegionPartitioner.class); RunningJob job = JobClient.runJob(jobConf); assertTrue(job.isSuccessful()); } finally { if (jobConf != null) FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); } }
public void run(JobConf conf) throws Exception { _runningJob = new JobClient(conf).submitJob(conf); info("See " + _runningJob.getTrackingURL() + " for details."); _runningJob.waitForCompletion(); if(!_runningJob.isSuccessful()) { throw new Exception("Hadoop job:" + getId() + " failed!"); } // dump all counters Counters counters = _runningJob.getCounters(); for(String groupName: counters.getGroupNames()) { Counters.Group group = counters.getGroup(groupName); info("Group: " + group.getDisplayName()); for(Counter counter: group) info(counter.getDisplayName() + ":\t" + counter.getValue()); } }
trackingURL = runningJob.getTrackingURL(); isComplete = runningJob.isComplete(); isSuccessful = runningJob.isSuccessful(); mapProgress = runningJob.mapProgress(); reduceProgress = runningJob.reduceProgress();
@Override protected void runTestOnTable(Table table) throws IOException { JobConf jobConf = null; try { LOG.info("Before map/reduce startup"); jobConf = new JobConf(UTIL.getConfiguration(), TestTableMapReduce.class); jobConf.setJobName("process column contents"); jobConf.setNumReduceTasks(1); TableMapReduceUtil.initTableMapJob(table.getName().getNameAsString(), Bytes.toString(INPUT_FAMILY), ProcessContentsMapper.class, ImmutableBytesWritable.class, Put.class, jobConf); TableMapReduceUtil.initTableReduceJob(table.getName().getNameAsString(), IdentityTableReduce.class, jobConf); LOG.info("Started " + table.getName()); RunningJob job = JobClient.runJob(jobConf); assertTrue(job.isSuccessful()); LOG.info("After map/reduce completion"); // verify map-reduce results verify(table.getName()); } finally { if (jobConf != null) { FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); } } } }
ss.getHiveHistory().setTaskCounters(queryId, getId(), ctrs); success = rj.isSuccessful();
ss.getHiveHistory().setTaskCounters(queryId, getId(), ctrs); success = rj.isSuccessful();
@Override protected void runJob(String jobName, Configuration c, List<Scan> scans) throws IOException, InterruptedException, ClassNotFoundException { JobConf job = new JobConf(TEST_UTIL.getConfiguration()); job.setJobName(jobName); job.setMapperClass(Mapper.class); job.setReducerClass(Reducer.class); TableMapReduceUtil.initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), Mapper.class, ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir); TableMapReduceUtil.addDependencyJars(job); job.setReducerClass(Reducer.class); job.setNumReduceTasks(1); // one to get final "first" and "last" key FileOutputFormat.setOutputPath(job, new Path(job.getJobName())); LOG.info("Started " + job.getJobName()); RunningJob runningJob = JobClient.runJob(job); runningJob.waitForCompletion(); assertTrue(runningJob.isSuccessful()); LOG.info("After map/reduce completion - job " + jobName); }
private boolean updateJobState() throws IOException { if (jobProgress == null) { jobProgress = new MapReduceJobState( rj, jobClient.getMapTaskReports(jobId), jobClient.getReduceTaskReports(jobId)); return true; } boolean complete = rj.isComplete(); boolean successful = rj.isSuccessful(); float mapProgress = rj.mapProgress(); float reduceProgress = rj.reduceProgress(); boolean update = !( jobProgress.isComplete() == complete && jobProgress.isSuccessful() == successful && AmbroseHiveUtil.isEqual(jobProgress.getMapProgress(), mapProgress) && AmbroseHiveUtil.isEqual(jobProgress.getReduceProgress(), reduceProgress) ); //do progress report only if necessary if (update) { jobProgress = new MapReduceJobState( rj, jobClient.getMapTaskReports(jobId), jobClient.getReduceTaskReports(jobId)); jobProgress.setJobLastUpdateTime(System.currentTimeMillis()); } return update; }
void testInputFormat(Class<? extends InputFormat> clazz) throws IOException { Configuration conf = UTIL.getConfiguration(); final JobConf job = new JobConf(conf); job.setInputFormat(clazz); job.setOutputFormat(NullOutputFormat.class); job.setMapperClass(ExampleVerifier.class); job.setNumReduceTasks(0); LOG.debug("submitting job."); final RunningJob run = JobClient.runJob(job); assertTrue("job failed!", run.isSuccessful()); assertEquals("Saw the wrong number of instances of the filtered-for row.", 2, run.getCounters() .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getCounter()); assertEquals("Saw any instances of the filtered out row.", 0, run.getCounters() .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getCounter()); assertEquals("Saw the wrong number of instances of columnA.", 1, run.getCounters() .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getCounter()); assertEquals("Saw the wrong number of instances of columnB.", 1, run.getCounters() .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getCounter()); assertEquals("Saw the wrong count of values for the filtered-for row.", 2, run.getCounters() .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getCounter()); assertEquals("Saw the wrong count of values for the filtered-out row.", 0, run.getCounters() .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getCounter()); }
Assert.assertTrue(job.isSuccessful()); } finally { if (!shutdownCluster) {
protected boolean internalNonBlockingIsSuccessful() throws IOException { return runningJob != null && runningJob.isSuccessful(); }
/** * Check if the job completed successfully. * * @return <code>true</code> if the job succeeded, else <code>false</code>. * @throws IOException */ public boolean isSuccessful() throws IOException { ensureState(JobState.RUNNING); return info.isSuccessful(); }
public class Hadoop1 extends Configured implements Tool { public static void main(String args[]) throws Exception { ToolRunner.run(new Hadoop1(), args); } public int run(String args[]) { JobConf job = new JobConf(getConf()); // rest of your job init code... RunningJob rj = JobClient.runJob(job); rj.waitForCompletion(); return rj.isSuccessful() ? 0 : 1; } }
public static boolean isMainSuccessful(RunningJob runningJob) throws IOException { boolean succeeded = runningJob.isSuccessful(); if (succeeded) { Counters counters = runningJob.getCounters(); if (counters != null) { Counters.Group group = counters.getGroup(LauncherAMUtils.COUNTER_GROUP); if (group != null) { succeeded = group.getCounter(LauncherAMUtils.COUNTER_LAUNCHER_ERROR) == 0; } } } return succeeded; }
@Test public void testKeyMismatch() throws Exception { // Set bad MapOutputKeyClass and MapOutputValueClass conf.setMapOutputKeyClass(IntWritable.class); conf.setMapOutputValueClass(IntWritable.class); RunningJob r_job = jc.submitJob(conf); while (!r_job.isComplete()) { Thread.sleep(1000); } if (r_job.isSuccessful()) { fail("Oops! The job was supposed to break due to an exception"); } }
@Test public void testValueMismatch() throws Exception { conf.setMapOutputKeyClass(Text.class); conf.setMapOutputValueClass(IntWritable.class); RunningJob r_job = jc.submitJob(conf); while (!r_job.isComplete()) { Thread.sleep(1000); } if (r_job.isSuccessful()) { fail("Oops! The job was supposed to break due to an exception"); } }