/** {@inheritDoc} */ @Override public String user() { return info().user(); } }
/** {@inheritDoc} */ @Override public boolean hasCombiner() { return info().hasCombiner(); }
/** {@inheritDoc} */ @Override public int reducers() { return info().reducers(); }
/** {@inheritDoc} */ @Override public String jobName() { return info().jobName(); }
/** {@inheritDoc} */ @Nullable @Override public String property(String name) { return info().property(name); }
/** {@inheritDoc} */ @Override public boolean hasReducer() { return info().hasReducer(); }
/** * @param maps Maps. * @param idx Index. * @return Map. */ private HadoopMultimap getOrCreateMap(AtomicReferenceArray<HadoopMultimap> maps, int idx) { HadoopMultimap map = maps.get(idx); if (map == null) { // Create new map. map = get(job.info(), SHUFFLE_REDUCER_NO_SORTING, false) ? new HadoopConcurrentHashMultimap(job.info(), mem, get(job.info(), PARTITION_HASHMAP_SIZE, 8 * 1024)): new HadoopSkipList(job.info(), mem); if (!maps.compareAndSet(idx, null, map)) { map.close(); return maps.get(idx); } } return map; }
/** * @param ctx Task info. * @return Task output. * @throws IgniteCheckedException If failed. */ private HadoopTaskOutput createOutputInternal(HadoopTaskContext ctx) throws IgniteCheckedException { switch (ctx.taskInfo().type()) { case SETUP: case REDUCE: case COMMIT: case ABORT: return null; case MAP: if (job.info().hasCombiner()) { assert combinerInput == null; combinerInput = get(job.info(), SHUFFLE_COMBINER_NO_SORTING, false) ? new HadoopHashMultimap(job.info(), mem, get(job.info(), COMBINER_HASHMAP_SIZE, 8 * 1024)): new HadoopSkipList(job.info(), mem); // TODO replace with red-black tree return combinerInput.startAdding(ctx); } default: return createOutput(ctx); } }
Configuration hadoopCfg = HadoopUtils.safeCreateConfiguration(); final HadoopJobInfo jobInfo = job.info();
/** * Creates DataInput to read JobConf. * * @param job Job. * @return DataInput with JobConf. * @throws IgniteCheckedException If failed. */ private static DataInput jobConfDataInput(HadoopJobEx job) throws IgniteCheckedException { JobConf jobConf = new JobConf(); for (Map.Entry<String, String> e : ((HadoopDefaultJobInfo)job.info()).properties().entrySet()) jobConf.set(e.getKey(), e.getValue()); ByteArrayOutputStream buf = new ByteArrayOutputStream(); try { jobConf.write(new DataOutputStream(buf)); } catch (IOException e) { throw new IgniteCheckedException(e); } return new DataInputStream(new ByteArrayInputStream(buf.toByteArray())); }
/** * @param taskCtx Task context. * @return Output. * @throws IgniteCheckedException If failed. */ public HadoopTaskOutput output(HadoopTaskContext taskCtx) throws IgniteCheckedException { switch (taskCtx.taskInfo().type()) { case MAP: assert !job.info().hasCombiner() : "The output creation is allowed if combiner has not been defined."; case COMBINE: return new PartitionedOutput(taskCtx); default: throw new IllegalStateException("Illegal type: " + taskCtx.taskInfo().type()); } }
this.embedded = embedded; boolean stripeMappers0 = get(job.info(), SHUFFLE_MAPPER_STRIPED_OUTPUT, true); msgSize = get(job.info(), SHUFFLE_MSG_SIZE, DFLT_SHUFFLE_MSG_SIZE); msgGzip = get(job.info(), SHUFFLE_MSG_GZIP, DFLT_SHUFFLE_MSG_GZIP); msgs = new HadoopShuffleMessage[rmtMapsSize]; throttle = get(job.info(), SHUFFLE_JOB_THROTTLE, 0);
/** * Sends prepare request to remote process. * * @param proc Process to send request to. * @param job Job. * @param plan Map reduce plan. */ private void prepareForJob(HadoopProcess proc, HadoopJobEx job, HadoopMapReducePlan plan) { try { comm.sendMessage(proc.descriptor(), new HadoopPrepareForJobRequest(job.id(), job.info(), plan.reducers(), plan.reducers(ctx.localNodeId()))); } catch (IgniteCheckedException e) { U.error(log, "Failed to send job prepare request to remote process [proc=" + proc + ", job=" + job + ", plan=" + plan + ']', e); proc.terminate(); } }
/** * Sends execution request to remote node. * * @param proc Process to send request to. * @param job Job instance. * @param tasks Collection of tasks to execute in started process. */ private void sendExecutionRequest(HadoopProcess proc, HadoopJobEx job, Collection<HadoopTaskInfo> tasks) throws IgniteCheckedException { // Must synchronize since concurrent process crash may happen and will receive onConnectionLost(). proc.lock(); try { if (proc.terminated()) { notifyTasksFailed(tasks, CRASHED, null); return; } HadoopTaskExecutionRequest req = new HadoopTaskExecutionRequest(); req.jobId(job.id()); req.jobInfo(job.info()); req.tasks(tasks); comm.sendMessage(proc.descriptor(), req); } finally { proc.unlock(); } }
outputFormat = reduce || !taskCtx.job().info().hasReducer() ? prepareWriter(jobCtx) : null;
HadoopJobInfo jobInfo = taskCtx.job().info();
collector = collector(jobConf, taskCtx0, reduce || !job.info().hasReducer(), fileName(), taskCtx0.attemptId());
if (info.type() == MAP && job.info().hasCombiner()) {