/** * @param ctx Task context. * @throws IgniteCheckedException If failed. */ protected AdderBase(HadoopTaskContext ctx) throws IgniteCheckedException { valSer = ctx.valueSerialization(); keySer = ctx.keySerialization(); out = new HadoopDataOutStream(mem) { @Override public long move(long size) { long ptr = super.move(size); if (ptr == 0) // Was not able to move - not enough free space. ptr = allocateNextPage(size); assert ptr != 0; return ptr; } }; }
/** * Cancel the executed task. */ public void cancel() { cancelled = true; if (ctx != null) ctx.cancel(); }
/** {@inheritDoc} */ @Override public Counters.Counter getCounter(String grp, String name) { return new HadoopV1Counter(ctx.counter(grp, name, HadoopLongCounter.class)); }
/** * @param perfCntr Performance counter. * @throws IgniteCheckedException If failed. */ private void runTask(HadoopPerformanceCounter perfCntr) throws IgniteCheckedException { if (cancelled) throw new HadoopTaskCancelledException("Task cancelled."); try (HadoopTaskOutput out = createOutputInternal(ctx); HadoopTaskInput in = createInputInternal(ctx)) { ctx.input(in); ctx.output(out); perfCntr.onTaskStart(ctx.taskInfo(), U.currentTimeMillis()); ctx.run(); } }
HadoopPerformanceCounter perfCntr = HadoopPerformanceCounter.getCounter(taskCtx.counters(), null); HadoopSerialization keySer = taskCtx.keySerialization(); HadoopSerialization valSer = taskCtx.valueSerialization();
perfCntr = HadoopPerformanceCounter.getCounter(ctx.counters(), nodeId); ctx.prepareTaskEnvironment(); if (ctx.taskInfo().hasMapperIndex()) combineTaskInfo.mapperIndex(ctx.taskInfo().mapperIndex()); ctx.taskInfo(combineTaskInfo); ctx.taskInfo(info); perfCntr.onTaskFinish(info, execEndTs); onTaskFinished(new HadoopTaskStatus(state, err, ctx==null ? null : ctx.counters())); ctx.cleanupTaskEnvironment();
HadoopJobEx job = taskCtx.job(); if (!reduce && taskCtx.taskInfo().hasMapperIndex()) HadoopMapperUtils.mapperIndex(taskCtx.taskInfo().mapperIndex()); else HadoopMapperUtils.clearMapperIndex(); JobConf jobConf = taskCtx0.jobConf(); HadoopTaskInput input = taskCtx.input(); taskCtx.onMapperFinished();
HadoopJobEx job = taskCtx.job(); if (taskCtx.taskInfo().hasMapperIndex()) HadoopMapperUtils.mapperIndex(taskCtx.taskInfo().mapperIndex()); else HadoopMapperUtils.clearMapperIndex(); taskCtx.onMapperFinished();
/** {@inheritDoc} */ @Override public HadoopTaskInput input(HadoopTaskContext taskCtx) throws IgniteCheckedException { Input in = new Input(taskCtx); Comparator<Object> grpCmp = taskCtx.groupComparator(); if (grpCmp != null) return new GroupedInput(grpCmp, in); return in; }
HadoopPerformanceCounter perfCntr = HadoopPerformanceCounter.getCounter(taskCtx.counters(), null);
/** * @param taskCtx Task context. * @throws IgniteCheckedException If failed. */ private Input(HadoopTaskContext taskCtx) throws IgniteCheckedException { keyReader = new Reader(taskCtx.keySerialization()); valReader = new Reader(taskCtx.valueSerialization()); }
/** {@inheritDoc} */ @Override public Counter getCounter(String grpName, String cntrName) { return new HadoopV2Counter(ctx.counter(grpName, cntrName, HadoopLongCounter.class)); }
/** * Constructor. * * @param flushSize Flush size. * @param gzip Whether to perform GZIP. * @param taskCtx Task context. * @throws IgniteCheckedException If failed. */ public HadoopDirectDataOutputContext(int flushSize, boolean gzip, HadoopTaskContext taskCtx) throws IgniteCheckedException { this.flushSize = flushSize; this.gzip = gzip; keySer = taskCtx.keySerialization(); valSer = taskCtx.valueSerialization(); out = new HadoopDirectDataOutput(flushSize); if (gzip) gzipOut = new HadoopDirectDataOutput(Math.max(flushSize / 8, GZIP_OUT_MIN_ALLOC_SIZE)); }
/** * @param taskCtx Task context. * @throws IgniteCheckedException If failed. */ public Input(HadoopTaskContext taskCtx) throws IgniteCheckedException { cap = capacity(); keyReader = new Reader(taskCtx.keySerialization()); valReader = new Reader(taskCtx.valueSerialization()); }
null)); HadoopSerialization ser = taskCtx.keySerialization(); ser = taskCtx.valueSerialization();