LOG.info("Submitted compaction job '" + job.getJobName() + "' with jobID=" + rj.getID() + " compaction ID=" + id); txnHandler.setHadoopJobId(rj.getID().toString(), id); rj.waitForCompletion(); if (!rj.isSuccessful()) { throw new IOException(compactionType == CompactionType.MAJOR ? "Major" : "Minor" +
+ " compactionId=" + id, e); rj.waitForCompletion(); if (!rj.isSuccessful()) { throw new IOException((compactionType == CompactionType.MAJOR ? "Major" : "Minor") +
public void run(JobConf conf) throws Exception { _runningJob = new JobClient(conf).submitJob(conf); info("See " + _runningJob.getTrackingURL() + " for details."); _runningJob.waitForCompletion(); if(!_runningJob.isSuccessful()) { throw new Exception("Hadoop job:" + getId() + " failed!"); } // dump all counters Counters counters = _runningJob.getCounters(); for(String groupName: counters.getGroupNames()) { Counters.Group group = counters.getGroup(groupName); info("Group: " + group.getDisplayName()); for(Counter counter: group) info(counter.getDisplayName() + ":\t" + counter.getValue()); } }
@Override protected void runJob(String jobName, Configuration c, List<Scan> scans) throws IOException, InterruptedException, ClassNotFoundException { JobConf job = new JobConf(TEST_UTIL.getConfiguration()); job.setJobName(jobName); job.setMapperClass(Mapper.class); job.setReducerClass(Reducer.class); TableMapReduceUtil.initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), Mapper.class, ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir); TableMapReduceUtil.addDependencyJars(job); job.setReducerClass(Reducer.class); job.setNumReduceTasks(1); // one to get final "first" and "last" key FileOutputFormat.setOutputPath(job, new Path(job.getJobName())); LOG.info("Started " + job.getJobName()); RunningJob runningJob = JobClient.runJob(job); runningJob.waitForCompletion(); assertTrue(runningJob.isSuccessful()); LOG.info("After map/reduce completion - job " + jobName); }
jobconf.setOutputValueGroupingComparator(NaturalKeyGroupingComparator.class); JobClient.runJob(jobconf).waitForCompletion();
List<RunningJob> runningJobs = new ArrayList<RunningJob>(); for (String jobSpec: jobSpecs) { // Configure, for example, a params map that gets passed into the MR class's constructor ToolRunner.run(new Configuration(), new MapReduceClass(params, runningJobs), null); } for (RunningJob rj: runningJobs) { System.err.println("Waiting on job "+rj.getID()); rj.waitForCompletion(); }
public class Hadoop1 extends Configured implements Tool { public static void main(String args[]) throws Exception { ToolRunner.run(new Hadoop1(), args); } public int run(String args[]) { JobConf job = new JobConf(getConf()); // rest of your job init code... RunningJob rj = JobClient.runJob(job); rj.waitForCompletion(); return rj.isSuccessful() ? 0 : 1; } }
void runJobAsUser(final JobConf job, UserGroupInformation ugi) throws Exception { RunningJob rj = ugi.doAs(new PrivilegedExceptionAction<RunningJob>() { public RunningJob run() throws IOException { return JobClient.runJob(job); } }); rj.waitForCompletion(); assertEquals("SUCCEEDED", JobStatus.getJobRunState(rj.getJobState())); }
@Override public int run(String[] args) throws Exception { // Set up the base conf: JobConf conf = new JobConf( getConf(), WARCStatsTool.class ); // Get the job configuration: this.createJobConf(conf, args); // Submit it: JobClient client = new JobClient( conf ); RunningJob job = client.submitJob(conf); // And await completion before exiting... job.waitForCompletion(); return 0; }
@Override public int run(String[] args) throws Exception { // Set up the base conf: JobConf conf = new JobConf( getConf(), WARCStatsTool.class ); // Get the job configuration: this.createJobConf(conf, args); // Submit it: JobClient client = new JobClient( conf ); RunningJob job = client.submitJob(conf); // And await completion before exiting... job.waitForCompletion(); return 0; }
public void testCommitFail() throws IOException { final Path inDir = new Path(rootDir, "./input"); final Path outDir = new Path(rootDir, "./output"); JobConf jobConf = createJobConf(); jobConf.setMaxMapAttempts(1); jobConf.setOutputCommitter(CommitterWithCommitFail.class); RunningJob rJob = UtilsForTests.runJob(jobConf, inDir, outDir, 1, 0); rJob.waitForCompletion(); assertEquals(JobStatus.FAILED, rJob.getJobState()); }
private void testFailCommitter(Class<? extends OutputCommitter> theClass, JobConf jobConf) throws IOException { jobConf.setOutputCommitter(theClass); RunningJob job = UtilsForTests.runJob(jobConf, inDir, outDir); // wait for the job to finish. job.waitForCompletion(); assertEquals(JobStatus.FAILED, job.getJobState()); }
void runJobAsUser(final JobConf job, UserGroupInformation ugi) throws Exception { RunningJob rj = ugi.doAs(new PrivilegedExceptionAction<RunningJob>() { public RunningJob run() throws IOException { return JobClient.runJob(job); } }); rj.waitForCompletion(); Assert.assertEquals("SUCCEEDED", JobStatus.getJobRunState(rj.getJobState())); }
public static void run1(Map<String, String> path) throws IOException { JobConf conf = Recommend.config(); String input = path.get("Step3Input1"); String output = path.get("Step3Output1"); HdfsDAO hdfs = new HdfsDAO(Recommend.HDFS, conf); hdfs.rmr(output); conf.setOutputKeyClass(IntWritable.class); conf.setOutputValueClass(Text.class); conf.setMapperClass(Step31_UserVectorSplitterMapper.class); conf.setInputFormat(TextInputFormat.class); conf.setOutputFormat(TextOutputFormat.class); FileInputFormat.setInputPaths(conf, new Path(input)); FileOutputFormat.setOutputPath(conf, new Path(output)); RunningJob job = JobClient.runJob(conf); while (!job.isComplete()) { job.waitForCompletion(); } }
public static void run2(Map<String, String> path) throws IOException { JobConf conf = Recommend.config(); String input = path.get("Step3Input2"); String output = path.get("Step3Output2"); HdfsDAO hdfs = new HdfsDAO(Recommend.HDFS, conf); hdfs.rmr(output); conf.setOutputKeyClass(Text.class); conf.setOutputValueClass(IntWritable.class); conf.setMapperClass(Step32_CooccurrenceColumnWrapperMapper.class); conf.setInputFormat(TextInputFormat.class); conf.setOutputFormat(TextOutputFormat.class); FileInputFormat.setInputPaths(conf, new Path(input)); FileOutputFormat.setOutputPath(conf, new Path(output)); RunningJob job = JobClient.runJob(conf); while (!job.isComplete()) { job.waitForCompletion(); } }
public static void run(Map<String, String> path) throws IOException { JobConf conf = Recommend.config(); String input = path.get("Step2Input"); String output = path.get("Step2Output"); HdfsDAO hdfs = new HdfsDAO(Recommend.HDFS, conf); hdfs.rmr(output); conf.setOutputKeyClass(Text.class); conf.setOutputValueClass(IntWritable.class); conf.setMapperClass(Step2_UserVectorToCooccurrenceMapper.class); conf.setCombinerClass(Step2_UserVectorToConoccurrenceReducer.class); conf.setReducerClass(Step2_UserVectorToConoccurrenceReducer.class); conf.setInputFormat(TextInputFormat.class); conf.setOutputFormat(TextOutputFormat.class); FileInputFormat.setInputPaths(conf, new Path(input)); FileOutputFormat.setOutputPath(conf, new Path(output)); RunningJob job = JobClient.runJob(conf); while (!job.isComplete()) { job.waitForCompletion(); } } }
public void testCommitFail() throws IOException { final Path inDir = new Path(rootDir, "./input"); final Path outDir = new Path(rootDir, "./output"); JobConf jobConf = createJobConf(); jobConf.setMaxMapAttempts(1); jobConf.setOutputCommitter(CommitterWithCommitFail.class); RunningJob rJob = UtilsForTests.runJob(jobConf, inDir, outDir, 1, 0); rJob.waitForCompletion(); assertEquals(JobStatus.FAILED, rJob.getJobState()); }
/** * Test {@link Reporter}'s progress for a map-only job. * This will make sure that only the map phase decides the attempt's progress. */ @SuppressWarnings("deprecation") @Test public void testReporterProgressForMapOnlyJob() throws IOException { Path test = new Path(testRootTempDir, "testReporterProgressForMapOnlyJob"); JobConf conf = new JobConf(); conf.setMapperClass(ProgressTesterMapper.class); conf.setMapOutputKeyClass(Text.class); // fail early conf.setMaxMapAttempts(1); conf.setMaxReduceAttempts(0); RunningJob job = UtilsForTests.runJob(conf, new Path(test, "in"), new Path(test, "out"), 1, 0, INPUT); job.waitForCompletion(); assertTrue("Job failed", job.isSuccessful()); }
private static void executeJavaMapReduce(String[] args) throws IOException, InterruptedException { JobConf jConf = createSleepMapperReducerJobConf(); final Path input = new Path(args[1]); FileInputFormat.setInputPaths(jConf, input); FileOutputFormat.setOutputPath(jConf, new Path(args[2])); writeToFile(input, jConf, "dummy\n", "data.txt"); JobClient jc = new JobClient(jConf); System.out.println("Submitting MR job"); RunningJob job = jc.submitJob(jConf); System.out.println("Submitted job " + job.getID().toString()); writeToFile(input, jConf, job.getID().toString(), JOB_ID_FILE_NAME); job.waitForCompletion(); jc.monitorAndPrintJob(jConf, job); if (job.getJobState() != JobStatus.SUCCEEDED) { System.err.println(job.getJobState() + " job state instead of" + JobStatus.SUCCEEDED); System.exit(-1); } }
@Override protected void runJob(String jobName, Configuration c, List<Scan> scans) throws IOException, InterruptedException, ClassNotFoundException { JobConf job = new JobConf(TEST_UTIL.getConfiguration()); job.setJobName(jobName); job.setMapperClass(Mapper.class); job.setReducerClass(Reducer.class); TableMapReduceUtil.initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), Mapper.class, ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir); TableMapReduceUtil.addDependencyJars(job); job.setReducerClass(Reducer.class); job.setNumReduceTasks(1); // one to get final "first" and "last" key FileOutputFormat.setOutputPath(job, new Path(job.getJobName())); LOG.info("Started " + job.getJobName()); RunningJob runningJob = JobClient.runJob(job); runningJob.waitForCompletion(); assertTrue(runningJob.isSuccessful()); LOG.info("After map/reduce completion - job " + jobName); }