@SuppressWarnings("deprecation") private TaskAttemptContext getContext(String nameOutput) throws IOException { TaskAttemptContext taskContext = taskContexts.get(nameOutput); if (taskContext != null) { return taskContext; } // The following trick leverages the instantiation of a record writer via // the job thus supporting arbitrary output formats. Job job = new Job(context.getConfiguration()); job.setOutputFormatClass(getNamedOutputFormatClass(context, nameOutput)); Schema keySchema=null,valSchema=null; if (job.getConfiguration().get(MO_PREFIX + nameOutput + ".keyschema",null) != null) keySchema = Schema.parse(job.getConfiguration().get( MO_PREFIX + nameOutput + ".keyschema")); if (job.getConfiguration().get(MO_PREFIX + nameOutput + ".valueschema", null) != null) valSchema = Schema.parse(job.getConfiguration().get( MO_PREFIX + nameOutput + ".valueschema")); setSchema(job, keySchema, valSchema); taskContext = createTaskAttemptContext( job.getConfiguration(), context.getTaskAttemptID()); taskContexts.put(nameOutput, taskContext); return taskContext; }
/** * Write key value to an output file name. * * Gets the record writer from job's output format. Job's output format should * be a FileOutputFormat. * * @param key the key * @param value the value * @param keySchema keySchema to use * @param valSchema ValueSchema to use * @param baseOutputPath base-output path to write the record to. Note: Framework will * generate unique filename for the baseOutputPath */ @SuppressWarnings("unchecked") public void write(Object key, Object value, Schema keySchema, Schema valSchema, String baseOutputPath) throws IOException, InterruptedException { checkBaseOutputPath(baseOutputPath); Job job = new Job(context.getConfiguration()); setSchema(job, keySchema, valSchema); TaskAttemptContext taskContext = createTaskAttemptContext(job.getConfiguration(), context.getTaskAttemptID()); getRecordWriter(taskContext, baseOutputPath).write(key, value); }
protected TaskAttemptID getTaskAttemptID() { if (context == null) { return null; } return context.getTaskAttemptID(); }
@Override public TaskAttemptID getTaskAttemptID() { return base.getTaskAttemptID(); }
@Override public TaskAttemptID getTaskAttemptID() { return base.getTaskAttemptID(); }
@Override public TaskAttemptID getTaskAttemptID() { return base.getTaskAttemptID(); }
@Override public TaskAttemptID getTaskAttemptID() { return base.getTaskAttemptID(); }
@Override public TaskAttemptID getTaskAttemptID() { return base.getTaskAttemptID(); }
@Override public TaskAttemptID getTaskAttemptID() { return base.getTaskAttemptID(); }
protected TaskAttemptID getTaskAttemptID() { return context.getTaskAttemptID(); }
protected TaskAttemptID getTaskAttemptID() { if (context != null) { return context.getTaskAttemptID(); } else { return new TaskAttemptID(); } }
private List<Counter> getCounters(String name) { assert name != null; try { List<Counter> results = new ArrayList<>(); if (context.getTaskAttemptID().getTaskType() == TaskType.MAP) { results.add(context.getCounter(TaskCounter.MAP_OUTPUT_RECORDS)); } else { results.add(context.getCounter(TaskCounter.REDUCE_OUTPUT_RECORDS)); } results.add(context.getCounter(COUNTER_GROUP, name)); return results; } catch (RuntimeException e) { LOG.warn("Failed to create counters", e); return Collections.emptyList(); } }
@Override public void initialize() { // create a good random seed, yet ensure deterministic PRNG sequence for easy reproducability long taskId = getContext().getTaskAttemptID().getTaskID().getId(); // taskId = 0, 1, ..., N prng = new Random(421439783L * (taskId + 1)); }
private TaskAttemptContext getContext(String nameOutput) throws IOException { TaskAttemptContext taskContext = taskContexts.get(nameOutput); if (taskContext != null) { return taskContext; } // The following trick leverages the instantiation of a record writer via // the job thus supporting arbitrary output formats. Job job = new Job(context.getConfiguration()); job.setOutputFormatClass(getNamedOutputFormatClass(context, nameOutput)); job.setOutputKeyClass(getNamedOutputKeyClass(context, nameOutput)); job.setOutputValueClass(getNamedOutputValueClass(context, nameOutput)); taskContext = new TaskAttemptContextImpl(job.getConfiguration(), context .getTaskAttemptID(), new WrappedStatusReporter(context)); taskContexts.put(nameOutput, taskContext); return taskContext; }
private TaskAttemptContext getContext(String nameOutput) throws IOException { TaskAttemptContext taskContext = taskContexts.get(nameOutput); if (taskContext != null) { return taskContext; } // The following trick leverages the instantiation of a record writer via // the job thus supporting arbitrary output formats. Job job = Job.getInstance(context.getConfiguration()); job.setOutputFormatClass(getNamedOutputFormatClass(context, nameOutput)); job.setOutputKeyClass(getNamedOutputKeyClass(context, nameOutput)); job.setOutputValueClass(getNamedOutputValueClass(context, nameOutput)); taskContext = new TaskAttemptContextImpl(job.getConfiguration(), context .getTaskAttemptID(), new WrappedStatusReporter(context)); taskContexts.put(nameOutput, taskContext); return taskContext; }
private String getCounterName(TaskInputOutputContext context) { MapReduceMetrics.TaskType taskType = MapReduceMetrics.TaskType.from(context.getTaskAttemptID().getTaskType()); switch (taskType) { case Mapper: return TaskCounter.MAP_OUTPUT_RECORDS.name(); case Reducer: return TaskCounter.REDUCE_OUTPUT_RECORDS.name(); default: throw new IllegalArgumentException("Illegal task type: " + taskType); } } }
private String getCounterName(TaskInputOutputContext context) { MapReduceMetrics.TaskType taskType = MapReduceMetrics.TaskType.from(context.getTaskAttemptID().getTaskType()); switch (taskType) { case Mapper: return TaskCounter.MAP_OUTPUT_RECORDS.name(); case Reducer: return TaskCounter.REDUCE_OUTPUT_RECORDS.name(); default: throw new IllegalArgumentException("Illegal task type: " + taskType); } } }
private TaskAttemptContext getContext(String nameOutput) throws IOException { TaskAttemptContext taskContext = taskContexts.get(nameOutput); if (taskContext != null) { return taskContext; } // The following trick leverages the instantiation of a record writer via // the job thus supporting arbitrary output formats. Job job = Job.getInstance(context.getConfiguration()); job.setOutputFormatClass(getNamedOutputFormatClass(context, nameOutput)); job.setOutputKeyClass(getNamedOutputKeyClass(context, nameOutput)); job.setOutputValueClass(getNamedOutputValueClass(context, nameOutput)); taskContext = new TaskAttemptContextImpl(job.getConfiguration(), context .getTaskAttemptID(), new WrappedStatusReporter(context)); taskContexts.put(nameOutput, taskContext); return taskContext; }
/** * <p> * This method adds header to the first log file for each of the tasks. * </p> * * @param context * Context * @param className * calling class */ @SuppressWarnings(RAW_TYPES) public static void addLogHeader(TaskInputOutputContext context, String className) { getLogMsg(className, context.getJobName(), INFO, context.getJobID(), context.getTaskAttemptID()); }
public MapReducePOStoreImpl(TaskInputOutputContext<?,?,?,?> context) { // get a copy of the Configuration so that changes to the // configuration below (like setting the output location) do // not affect the caller's copy Configuration outputConf = new Configuration(context.getConfiguration()); reporter = PigStatusReporter.getInstance(); reporter.setContext(new MRTaskContext(context)); // make a copy of the Context to use here - since in the same // task (map or reduce) we could have multiple stores, we should // make this copy so that the same context does not get over-written // by the different stores. this.context = HadoopShims.createTaskAttemptContext(outputConf, context.getTaskAttemptID()); }