/** {@inheritDoc} */ @Override public String user() { return info().user(); } }
/** {@inheritDoc} */ @Override public Void call() throws IgniteCheckedException { ctx = job.getTaskContext(info); return ctx.runAsJobOwner(new Callable<Void>() { @Override public Void call() throws Exception { call0(); return null; } }); }
/** * @param igniteInstanceName Ignite instance name. * @param io IO Closure for sending messages. */ @SuppressWarnings("BusyWait") public void startSending(String igniteInstanceName, IgniteInClosure2X<T, HadoopMessage> io) { assert snd == null; assert io != null; this.io = io; if (!stripeMappers) { if (!flushed) { snd = new GridWorker(igniteInstanceName, "hadoop-shuffle-" + job.id(), log) { @Override protected void body() throws InterruptedException { try { while (!isCancelled()) { if (throttle > 0) Thread.sleep(throttle); collectUpdatesAndSend(false); } } catch (IgniteCheckedException e) { throw new IllegalStateException(e); } } }; new IgniteThread(snd).start(); } } ioInitLatch.countDown(); }
job.cleanupStagingDirectory(); String statWriterClsName = job.info().property(HadoopCommonUtils.JOB_COUNTER_WRITER_PROPERTY); job.dispose(false);
/** {@inheritDoc} */ @Override public void prepareTaskEnvironment() throws IgniteCheckedException { File locDir; switch(taskInfo().type()) { case MAP: case REDUCE: job().prepareTaskEnvironment(taskInfo()); locDir = taskLocalDir(job.igniteWorkDirectory(), locNodeId, taskInfo()); break; default: locDir = jobLocalDir(job.igniteWorkDirectory(), locNodeId, taskInfo().jobId()); } ClassLoader oldLdr = HadoopCommonUtils.setContextClassLoader(jobConf().getClassLoader()); try { FileSystem.get(jobConf()); LocalFileSystem locFs = FileSystem.getLocal(jobConf()); locFs.setWorkingDirectory(new Path(locDir.getAbsolutePath())); } catch (Throwable e) { if (e instanceof Error) throw (Error)e; throw transformException(e); } finally { HadoopCommonUtils.restoreContextClassLoader(oldLdr); } }
ctx.kernalContext().hadoopHelper()); job.initialize(false, ctx.localNodeId()); job.dispose(false);
/** * Stops all executors and running tasks. */ private void shutdown() { if (execSvc != null) execSvc.shutdown(5000); if (msgExecSvc != null) msgExecSvc.shutdownNow(); try { job.dispose(true); } catch (IgniteCheckedException e) { U.error(log, "Failed to dispose job.", e); } }
/** {@inheritDoc} */ @Override public void cleanupTaskEnvironment() throws IgniteCheckedException { job().cleanupTaskEnvironment(taskInfo()); }
job.initialize(true, nodeDesc.processId());
/** * @param taskInfo Task info. * @param gridJob Grid Hadoop job. */ public HadoopTestTaskContext(HadoopTaskInfo taskInfo, HadoopJobEx gridJob) throws IgniteCheckedException { super(taskInfo, gridJob, gridJob.id(), null, jobConfDataInput(gridJob)); }
/** {@inheritDoc} */ @Nullable @Override public String property(String name) { return info().property(name); }
this.embedded = embedded; boolean stripeMappers0 = get(job.info(), SHUFFLE_MAPPER_STRIPED_OUTPUT, true); if (log.isInfoEnabled()) log.info("Striped mapper output is disabled becuase it cannot be used in external mode [jobId=" + job.id() + ']'); msgSize = get(job.info(), SHUFFLE_MSG_SIZE, DFLT_SHUFFLE_MSG_SIZE); msgGzip = get(job.info(), SHUFFLE_MSG_GZIP, DFLT_SHUFFLE_MSG_GZIP); HadoopTaskInfo taskInfo = new HadoopTaskInfo(HadoopTaskType.REDUCE, job.id(), rdc, 0, null); msgs = new HadoopShuffleMessage[rmtMapsSize]; throttle = get(job.info(), SHUFFLE_JOB_THROTTLE, 0);
/** * Creates reducer tasks based on job information. * * @param reducers Reducers (may be {@code null}). * @param job Job instance. * @return Collection of task infos. */ private Collection<HadoopTaskInfo> reducerTasks(int[] reducers, HadoopJobEx job) { UUID locNodeId = ctx.localNodeId(); HadoopJobId jobId = job.id(); JobLocalState state = activeJobs.get(jobId); Collection<HadoopTaskInfo> tasks = null; if (reducers != null) { if (state == null) state = initState(job.id()); for (int rdc : reducers) { if (state.addReducer(rdc)) { if (log.isDebugEnabled()) log.debug("Submitting REDUCE task for execution [locNodeId=" + locNodeId + ", rdc=" + rdc + ']'); HadoopTaskInfo taskInfo = new HadoopTaskInfo(REDUCE, jobId, rdc, 0, null); if (tasks == null) tasks = new ArrayList<>(); tasks.add(taskInfo); } } } return tasks; }
HadoopTaskContext taskCtx = job.getTaskContext(new HadoopTaskInfo(HadoopTaskType.MAP, null, 0, 0, null));
/** {@inheritDoc} */ @Override public boolean hasReducer() { return info().hasReducer(); }
/** * Sends prepare request to remote process. * * @param proc Process to send request to. * @param job Job. * @param plan Map reduce plan. */ private void prepareForJob(HadoopProcess proc, HadoopJobEx job, HadoopMapReducePlan plan) { try { comm.sendMessage(proc.descriptor(), new HadoopPrepareForJobRequest(job.id(), job.info(), plan.reducers(), plan.reducers(ctx.localNodeId()))); } catch (IgniteCheckedException e) { U.error(log, "Failed to send job prepare request to remote process [proc=" + proc + ", job=" + job + ", plan=" + plan + ']', e); proc.terminate(); } }
String outFldr = jobWorkFolder(job.id()) + File.separator + childProcId;
/** {@inheritDoc} */ @Override public boolean hasCombiner() { return info().hasCombiner(); }
/** * Sends execution request to remote node. * * @param proc Process to send request to. * @param job Job instance. * @param tasks Collection of tasks to execute in started process. */ private void sendExecutionRequest(HadoopProcess proc, HadoopJobEx job, Collection<HadoopTaskInfo> tasks) throws IgniteCheckedException { // Must synchronize since concurrent process crash may happen and will receive onConnectionLost(). proc.lock(); try { if (proc.terminated()) { notifyTasksFailed(tasks, CRASHED, null); return; } HadoopTaskExecutionRequest req = new HadoopTaskExecutionRequest(); req.jobId(job.id()); req.jobInfo(job.info()); req.tasks(tasks); comm.sendMessage(proc.descriptor(), req); } finally { proc.unlock(); } }