/** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { jobId = new HadoopJobId(); jobId.readExternal(in); msgCnt = in.readLong(); }
/** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { jobId = new HadoopJobId(); jobId.readExternal(in); }
/** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { jobId = new HadoopJobId(); jobId.readExternal(in); msgId = in.readLong(); }
/** {@inheritDoc} */ @Override public HadoopJobId nextJobId() { return new HadoopJobId(ctx.localNodeId(), idCtr.incrementAndGet()); }
/** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { jobId = new HadoopJobId(); jobId.readExternal(in); msgId = in.readLong(); reducer = in.readInt(); off = in.readInt(); buf = U.readByteArray(in); }
/** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { jobId = new HadoopJobId(); jobId.readExternal(in); reducer = in.readInt(); cnt = in.readInt(); buf = U.readByteArray(in); bufLen = buf != null ? buf.length : 0; dataLen = in.readInt(); }
/** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { jobId = new HadoopJobId(); jobId.readExternal(in); jobInfo = (HadoopJobInfo)in.readObject(); totalReducersCnt = in.readInt(); locReducers = U.readIntArray(in); }
/** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { jobId = new HadoopJobId(); jobId.readExternal(in); jobInfo = (HadoopJobInfo)in.readObject(); tasks = U.readCollection(in); } }
/** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { jobId = new HadoopJobId(); jobId.readExternal(in); jobPhase = (HadoopJobPhase)in.readObject(); reducersAddrs = (HadoopProcessDescriptor[])U.readArray(in); }
/** {@inheritDoc} */ @Override public Boolean run(ComputeJobContext jobCtx, Hadoop hadoop, HadoopProtocolTaskArguments args) throws IgniteCheckedException { UUID nodeId = UUID.fromString(args.<String>get(0)); Integer id = args.get(1); assert nodeId != null; assert id != null; HadoopJobId jobId = new HadoopJobId(nodeId, id); return hadoop.kill(jobId); } }
/** {@inheritDoc} */ @Override public HadoopCounters run(ComputeJobContext jobCtx, Hadoop hadoop, HadoopProtocolTaskArguments args) throws IgniteCheckedException { UUID nodeId = UUID.fromString(args.<String>get(0)); Integer id = args.get(1); assert nodeId != null; assert id != null; return hadoop.counters(new HadoopJobId(nodeId, id)); } }
/** * @throws Exception If failed. */ @Test public void testMapperException() throws Exception { prepareFile("/testFile", 1000); Configuration cfg = new Configuration(); cfg.setStrings("fs.igfs.impl", IgniteHadoopFileSystem.class.getName()); Job job = Job.getInstance(cfg); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); job.setMapperClass(FailMapper.class); job.setNumReduceTasks(0); job.setInputFormatClass(TextInputFormat.class); FileInputFormat.setInputPaths(job, new Path("igfs://" + igfsName + "@/")); FileOutputFormat.setOutputPath(job, new Path("igfs://" + igfsName + "@/output/")); job.setJarByClass(getClass()); final IgniteInternalFuture<?> fut = grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 3), createJobInfo(job.getConfiguration(), null)); GridTestUtils.assertThrows(log, new Callable<Object>() { @Override public Object call() throws Exception { fut.get(); return null; } }, IgniteCheckedException.class, null); }
/** {@inheritDoc} */ @Override public HadoopJobStatus run(ComputeJobContext jobCtx, Hadoop hadoop, HadoopProtocolTaskArguments args) throws IgniteCheckedException { UUID nodeId = UUID.fromString(args.<String>get(0)); Integer id = args.get(1); HadoopDefaultJobInfo info = args.get(2); assert nodeId != null; assert id != null; assert info != null; HadoopJobId jobId = new HadoopJobId(nodeId, id); hadoop.submit(jobId, info); HadoopJobStatus res = hadoop.status(jobId); if (res == null) // Submission failed. res = new HadoopJobStatus(jobId, info.jobName(), info.user(), 0, 0, 0, 0, PHASE_CANCELLING, true, 1); return res; } }
/** * @throws Exception If failed. */ @Test public void testMapRun() throws Exception { int lineCnt = 10000; String fileName = "/testFile"; prepareFile(fileName, lineCnt); totalLineCnt.set(0); taskWorkDirs.clear(); Configuration cfg = new Configuration(); cfg.setStrings("fs.igfs.impl", IgniteHadoopFileSystem.class.getName()); Job job = Job.getInstance(cfg); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); job.setMapperClass(TestMapper.class); job.setNumReduceTasks(0); job.setInputFormatClass(TextInputFormat.class); FileInputFormat.setInputPaths(job, new Path("igfs://" + igfsName + "@/")); FileOutputFormat.setOutputPath(job, new Path("igfs://" + igfsName + "@/output/")); job.setJarByClass(getClass()); IgniteInternalFuture<?> fut = grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 1), createJobInfo(job.getConfiguration(), null)); fut.get(); assertEquals(lineCnt, totalLineCnt.get()); assertEquals(32, taskWorkDirs.size()); }
/** * Creates WordCount hadoop job for API v2. * * @param inFile Input file name for the job. * @param outFile Output file name for the job. * @return Hadoop job. * @throws Exception if fails. */ @Override public HadoopJobEx getHadoopJob(String inFile, String outFile) throws Exception { Job job = Job.getInstance(); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); HadoopWordCount2.setTasksClasses(job, true, true, true, false); Configuration conf = job.getConfiguration(); setupFileSystems(conf); FileInputFormat.setInputPaths(job, new Path(inFile)); FileOutputFormat.setOutputPath(job, new Path(outFile)); job.setJarByClass(HadoopWordCount2.class); Job hadoopJob = HadoopWordCount2.getJob(inFile, outFile); HadoopDefaultJobInfo jobInfo = createJobInfo(hadoopJob.getConfiguration(), null); UUID uuid = new UUID(0, 0); HadoopJobId jobId = new HadoopJobId(uuid, 0); return jobInfo.createJob(HadoopV2Job.class, jobId, log, null, new HadoopHelperImpl()); }
/** * @throws Exception If failed. */ @Test public void testSimpleTaskSubmit() throws Exception { String testInputFile = "/test"; prepareTestFile(testInputFile); Configuration cfg = new Configuration(); setupFileSystems(cfg); Job job = Job.getInstance(cfg); job.setMapperClass(TestMapper.class); job.setCombinerClass(TestReducer.class); job.setReducerClass(TestReducer.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(IntWritable.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); job.setNumReduceTasks(1); FileInputFormat.setInputPaths(job, new Path("igfs://:" + getTestIgniteInstanceName(0) + "@/" + testInputFile)); FileOutputFormat.setOutputPath(job, new Path("igfs://:" + getTestIgniteInstanceName(0) + "@/output")); job.setJarByClass(getClass()); IgniteInternalFuture<?> fut = grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 1), createJobInfo(job.getConfiguration(), null)); fut.get(); }
/** * @param combiner With combiner. * @throws Exception If failed. */ public void doTestGrouping(boolean combiner) throws Exception { HadoopGroupingTestState.values().clear(); Job job = Job.getInstance(); job.setInputFormatClass(InFormat.class); job.setOutputFormatClass(OutFormat.class); job.setOutputKeyClass(YearTemperature.class); job.setOutputValueClass(Text.class); job.setMapperClass(Mapper.class); if (combiner) { job.setCombinerClass(MyReducer.class); job.setNumReduceTasks(0); job.setCombinerKeyGroupingComparatorClass(YearComparator.class); } else { job.setReducerClass(MyReducer.class); job.setNumReduceTasks(4); job.setGroupingComparatorClass(YearComparator.class); } grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 2), createJobInfo(job.getConfiguration(), null)).get(30000); assertTrue(HadoopGroupingTestState.values().isEmpty()); }
HadoopJobId jobId = new HadoopJobId(UUID.randomUUID(), 2);
HadoopJobId id = new HadoopJobId(uuid, 1);
/** * Creates WordCount hadoop job for API v1. * * @param inFile Input file name for the job. * @param outFile Output file name for the job. * @return Hadoop job. * @throws IOException If fails. */ @Override public HadoopJobEx getHadoopJob(String inFile, String outFile) throws Exception { JobConf jobConf = HadoopWordCount1.getJob(inFile, outFile); setupFileSystems(jobConf); HadoopDefaultJobInfo jobInfo = createJobInfo(jobConf, null); UUID uuid = new UUID(0, 0); HadoopJobId jobId = new HadoopJobId(uuid, 0); return jobInfo.createJob(HadoopV2Job.class, jobId, log, null, new HadoopHelperImpl()); }