/** {@inheritDoc} */ @Override public boolean hasReducer() { return reducers() > 0; }
for (Map.Entry<String, String> e : ((HadoopDefaultJobInfo)jobInfo).properties().entrySet()) hadoopCfg.set(e.getKey(), e.getValue());
/** {@inheritDoc} */ @Override public HadoopJobStatus run(ComputeJobContext jobCtx, Hadoop hadoop, HadoopProtocolTaskArguments args) throws IgniteCheckedException { UUID nodeId = UUID.fromString(args.<String>get(0)); Integer id = args.get(1); HadoopDefaultJobInfo info = args.get(2); assert nodeId != null; assert id != null; assert info != null; HadoopJobId jobId = new HadoopJobId(nodeId, id); hadoop.submit(jobId, info); HadoopJobStatus res = hadoop.status(jobId); if (res == null) // Submission failed. res = new HadoopJobStatus(jobId, info.jobName(), info.user(), 0, 0, 0, 0, PHASE_CANCELLING, true, 1); return res; } }
/** * Creates WordCount hadoop job for API v2. * * @param inFile Input file name for the job. * @param outFile Output file name for the job. * @return Hadoop job. * @throws Exception if fails. */ @Override public HadoopJobEx getHadoopJob(String inFile, String outFile) throws Exception { Job job = Job.getInstance(); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); HadoopWordCount2.setTasksClasses(job, true, true, true, false); Configuration conf = job.getConfiguration(); setupFileSystems(conf); FileInputFormat.setInputPaths(job, new Path(inFile)); FileOutputFormat.setOutputPath(job, new Path(outFile)); job.setJarByClass(HadoopWordCount2.class); Job hadoopJob = HadoopWordCount2.getJob(inFile, outFile); HadoopDefaultJobInfo jobInfo = createJobInfo(hadoopJob.getConfiguration(), null); UUID uuid = new UUID(0, 0); HadoopJobId jobId = new HadoopJobId(uuid, 0); return jobInfo.createJob(HadoopV2Job.class, jobId, log, null, new HadoopHelperImpl()); }
props.put(entry.getKey(), entry.getValue()); return new HadoopDefaultJobInfo(jobConf.getJobName(), jobConf.getUser(), hasCombiner, numReduces, props, credentials);
for (Map.Entry<String,String> e : jobInfo.properties().entrySet()) jobConf.set(e.getKey(), e.getValue());
HadoopJobEx job = info.createJob(HadoopV2Job.class, id, log, null, new HadoopHelperImpl());
/** * Creates DataInput to read JobConf. * * @param job Job. * @return DataInput with JobConf. * @throws IgniteCheckedException If failed. */ private static DataInput jobConfDataInput(HadoopJobEx job) throws IgniteCheckedException { JobConf jobConf = new JobConf(); for (Map.Entry<String, String> e : ((HadoopDefaultJobInfo)job.info()).properties().entrySet()) jobConf.set(e.getKey(), e.getValue()); ByteArrayOutputStream buf = new ByteArrayOutputStream(); try { jobConf.write(new DataOutputStream(buf)); } catch (IOException e) { throw new IgniteCheckedException(e); } return new DataInputStream(new ByteArrayInputStream(buf.toByteArray())); }
/** * Creates WordCount hadoop job for API v1. * * @param inFile Input file name for the job. * @param outFile Output file name for the job. * @return Hadoop job. * @throws IOException If fails. */ @Override public HadoopJobEx getHadoopJob(String inFile, String outFile) throws Exception { JobConf jobConf = HadoopWordCount1.getJob(inFile, outFile); setupFileSystems(jobConf); HadoopDefaultJobInfo jobInfo = createJobInfo(jobConf, null); UUID uuid = new UUID(0, 0); HadoopJobId jobId = new HadoopJobId(uuid, 0); return jobInfo.createJob(HadoopV2Job.class, jobId, log, null, new HadoopHelperImpl()); }
/** {@inheritDoc} */ @Override public boolean hasReducer() { return reducers() > 0; }