/** * @param clsName Input split class name. * @param in Input stream. * @param hosts Optional hosts. * @return File block or {@code null} if it is not a {@link FileSplit} instance. * @throws IgniteCheckedException If failed. */ @Nullable public static HadoopFileBlock readFileBlock(String clsName, FSDataInputStream in, @Nullable String[] hosts) throws IgniteCheckedException { if (!FileSplit.class.getName().equals(clsName)) return null; FileSplit split = U.newInstance(FileSplit.class); try { split.readFields(in); } catch (IOException e) { throw new IgniteCheckedException(e); } if (hosts == null) hosts = EMPTY_HOSTS; return new HadoopFileBlock(hosts, split.getPath().toUri(), split.getStart(), split.getLength()); } }
/** * @param clsName Input split class name. * @param in Input stream. * @param hosts Optional hosts. * @return File block or {@code null} if it is not a {@link FileSplit} instance. * @throws IgniteCheckedException If failed. */ public static HadoopFileBlock readFileBlock(String clsName, DataInput in, @Nullable String[] hosts) throws IgniteCheckedException { if (!FileSplit.class.getName().equals(clsName)) return null; FileSplit split = new FileSplit(); try { split.readFields(in); } catch (IOException e) { throw new IgniteCheckedException(e); } if (hosts == null) hosts = EMPTY_HOSTS; return new HadoopFileBlock(hosts, split.getPath().toUri(), split.getStart(), split.getLength()); } }
FileSplit s = (FileSplit)nativeSplit; res.add(new HadoopFileBlock(s.getLocations(), s.getPath().toUri(), s.getStart(), s.getLength()));
/** * @param jobConf Job configuration. * @return Collection of mapped splits. * @throws IgniteCheckedException If mapping failed. */ public static Collection<HadoopInputSplit> splitJob(JobConf jobConf) throws IgniteCheckedException { try { InputFormat<?, ?> format = jobConf.getInputFormat(); assert format != null; InputSplit[] splits = format.getSplits(jobConf, 0); Collection<HadoopInputSplit> res = new ArrayList<>(splits.length); for (int i = 0; i < splits.length; i++) { InputSplit nativeSplit = splits[i]; if (nativeSplit instanceof FileSplit) { FileSplit s = (FileSplit)nativeSplit; res.add(new HadoopFileBlock(s.getLocations(), s.getPath().toUri(), s.getStart(), s.getLength())); } else res.add(HadoopUtils.wrapSplit(i, nativeSplit, nativeSplit.getLocations())); } return res; } catch (IOException e) { throw new IgniteCheckedException(e); } }
/** * Test HDFS splits with Replication == 3. * * @throws Exception If failed. */ @Test public void testHdfsSplitsReplication() throws Exception { IgfsMock igfs = LocationsBuilder.create().add(0, NODE_1).add(50, NODE_2).add(100, NODE_3).buildIgfs(); final List<HadoopInputSplit> splits = new ArrayList<>(); splits.add(new HadoopFileBlock(new String[] { HOST_1, HOST_2, HOST_3 }, URI.create("hfds://" + HOST_1 + "/x"), 0, 50)); splits.add(new HadoopFileBlock(new String[] { HOST_2, HOST_3, HOST_4 }, URI.create("hfds://" + HOST_2 + "/x"), 50, 100)); splits.add(new HadoopFileBlock(new String[] { HOST_3, HOST_4, HOST_5 }, URI.create("hfds://" + HOST_3 + "/x"), 100, 37)); // The following splits belong to hosts that are out of Ignite topology at all. // This means that these splits should be assigned to any least loaded modes: splits.add(new HadoopFileBlock(new String[] { HOST_4, HOST_5, HOST_1 }, URI.create("hfds://" + HOST_4 + "/x"), 138, 2)); splits.add(new HadoopFileBlock(new String[] { HOST_5, HOST_1, HOST_2 }, URI.create("hfds://" + HOST_5 + "/x"), 140, 3)); final int expReducers = 8; HadoopPlannerMockJob job = new HadoopPlannerMockJob(splits, expReducers); IgniteHadoopWeightedMapReducePlanner planner = createPlanner(igfs); final HadoopMapReducePlan plan = planner.preparePlan(job, NODES, null); checkPlanMappers(plan, splits, NODES, true); checkPlanReducers(plan, NODES, expReducers, true); }
/** * Test one HDFS splits. * * @throws Exception If failed. */ @Test public void testHdfsSplitsAffinity() throws Exception { IgfsMock igfs = LocationsBuilder.create().add(0, NODE_1).add(50, NODE_2).add(100, NODE_3).buildIgfs(); final List<HadoopInputSplit> splits = new ArrayList<>(); splits.add(new HadoopFileBlock(new String[] { HOST_1 }, URI.create("hfds://" + HOST_1 + "/x"), 0, 50)); splits.add(new HadoopFileBlock(new String[] { HOST_2 }, URI.create("hfds://" + HOST_2 + "/x"), 50, 100)); splits.add(new HadoopFileBlock(new String[] { HOST_3 }, URI.create("hfds://" + HOST_3 + "/x"), 100, 37)); // The following splits belong to hosts that are out of Ignite topology at all. // This means that these splits should be assigned to any least loaded modes: splits.add(new HadoopFileBlock(new String[] { HOST_4 }, URI.create("hfds://" + HOST_4 + "/x"), 138, 2)); splits.add(new HadoopFileBlock(new String[] { HOST_5 }, URI.create("hfds://" + HOST_5 + "/x"), 140, 3)); final int expReducers = 7; HadoopPlannerMockJob job = new HadoopPlannerMockJob(splits, expReducers); IgniteHadoopWeightedMapReducePlanner planner = createPlanner(igfs); final HadoopMapReducePlan plan = planner.preparePlan(job, NODES, null); checkPlanMappers(plan, splits, NODES, true); checkPlanReducers(plan, NODES, expReducers, true); }
/** * Test one IGFS split being assigned to affinity node. * * @throws Exception If failed. */ @Test public void testOneIgfsSplitAffinity() throws Exception { IgfsMock igfs = LocationsBuilder.create().add(0, NODE_1).add(50, NODE_2).add(100, NODE_3).buildIgfs(); List<HadoopInputSplit> splits = new ArrayList<>(); splits.add(new HadoopFileBlock(new String[] { HOST_1 }, URI.create("igfs://igfs@/file"), 0, 50)); final int expReducers = 4; HadoopPlannerMockJob job = new HadoopPlannerMockJob(splits, expReducers); IgniteHadoopWeightedMapReducePlanner planner = createPlanner(igfs); HadoopMapReducePlan plan = planner.preparePlan(job, NODES, null); assert plan.mappers() == 1; assert plan.mapperNodeIds().size() == 1; assert plan.mapperNodeIds().contains(ID_1); checkPlanMappers(plan, splits, NODES, false/*only 1 split*/); checkPlanReducers(plan, NODES, expReducers, false/* because of threshold behavior.*/); }
HadoopFileBlock fileBlock1 = new HadoopFileBlock(HOSTS, inFileUri, 0, igfs.info(inFile).length() - 1); pw.println("world3 hello3"); HadoopFileBlock fileBlock2 = new HadoopFileBlock(HOSTS, inFileUri, fileBlock1.length(), igfs.info(inFile).length() - fileBlock1.length());
HadoopFileBlock fileBlock1 = new HadoopFileBlock(HOSTS, inFileUri, 0, l); HadoopFileBlock fileBlock2 = new HadoopFileBlock(HOSTS, inFileUri, l, fileLen - l);