/** {@inheritDoc} */ @Override public void writeExternal(ObjectOutput out) throws IOException { out.writeObject(file()); out.writeLong(start()); out.writeLong(length()); }
/** * Sort input splits by length. * * @param splits Splits. * @return Sorted splits. */ public static List<HadoopInputSplit> sortInputSplits(Collection<HadoopInputSplit> splits) { int id = 0; TreeSet<SplitSortWrapper> sortedSplits = new TreeSet<>(); for (HadoopInputSplit split : splits) { long len = split instanceof HadoopFileBlock ? ((HadoopFileBlock)split).length() : 0; sortedSplits.add(new SplitSortWrapper(id++, split, len)); } ArrayList<HadoopInputSplit> res = new ArrayList<>(sortedSplits.size()); for (SplitSortWrapper sortedSplit : sortedSplits) res.add(sortedSplit.split); return res; }
/** * Get affinity nodes for the given input split. * <p> * Order in the returned collection *is* significant, meaning that nodes containing more data * go first. This way, the 1st nodes in the collection considered to be preferable for scheduling. * * @param split Split. * @param top Topology. * @return Affintiy nodes. * @throws IgniteCheckedException If failed. */ private Collection<UUID> affinityNodesForSplit(HadoopInputSplit split, HadoopMapReducePlanTopology top) throws IgniteCheckedException { Collection<UUID> igfsNodeIds = igfsAffinityNodesForSplit(split); if (igfsNodeIds != null) return igfsNodeIds; Map<NodeIdAndLength, UUID> res = new TreeMap<>(); for (String host : split.hosts()) { long len = split instanceof HadoopFileBlock ? ((HadoopFileBlock)split).length() : 0L; HadoopMapReducePlanGroup grp = top.groupForHost(host); if (grp != null) { for (int i = 0; i < grp.nodeCount(); i++) { UUID nodeId = grp.nodeId(i); res.put(new NodeIdAndLength(nodeId, len), nodeId); } } } return new LinkedHashSet<>(res.values()); }
/** {@inheritDoc} */ @Override public InputSplit getInputSplit() { if (inputSplit == null) { HadoopInputSplit split = ctx.taskInfo().inputSplit(); if (split == null) return null; if (split instanceof HadoopFileBlock) { HadoopFileBlock fileBlock = (HadoopFileBlock)split; inputSplit = new FileSplit(new Path(fileBlock.file()), fileBlock.start(), fileBlock.length(), null); } else { try { inputSplit = (InputSplit) ((HadoopV2TaskContext)ctx).getNativeSplit(split); } catch (IgniteCheckedException e) { throw new IllegalStateException(e); } } } return inputSplit; }
HadoopFileBlock block = (HadoopFileBlock)split; nativeSplit = new FileSplit(new Path(block.file().toString()), block.start(), block.length(), EMPTY_HOSTS);
/** {@inheritDoc} */ @Override public void writeExternal(ObjectOutput out) throws IOException { out.writeObject(file()); out.writeLong(start()); out.writeLong(length()); }