@Override public TaskAttemptContext newTask(Configuration c, TaskAttemptID t) { return new TaskAttemptContext(c, t); }
@Override public TaskAttemptContext newTask(Configuration c, TaskAttemptID t) { return new TaskAttemptContext(c, t); }
/** * Create a TaskAttemptContext, supporting many Hadoops. * * @param conf Configuration * @param taskAttemptID TaskAttemptID to use * @return TaskAttemptContext */ public static TaskAttemptContext makeTaskAttemptContext(Configuration conf, TaskAttemptID taskAttemptID) { TaskAttemptContext context; context = new TaskAttemptContext(conf, taskAttemptID); return context; }
@Override public TaskAttemptContext newTask(Configuration c, TaskAttemptID t) { return new TaskAttemptContext(c, t); }
@Override public TaskAttemptContext createTaskAttemptContext(Configuration conf, TaskAttemptID taskId) { return new TaskAttemptContext(conf, taskId); }
@Override public TaskAttemptContext createTaskAttemptContext(Configuration conf, TaskAttemptID taskId) { return new TaskAttemptContext(conf, taskId); }
/** * Constructor * * @param inputFormat Hive table InputFormat * @param conf Configuration * @param splits input splits */ public RecordIterator(HiveApiInputFormat inputFormat, Configuration conf, Iterator<InputSplit> splits) { this.inputFormat = inputFormat; this.splits = splits; this.taskContext = new TaskAttemptContext(conf, new TaskAttemptID()); }
public static void main(String[] args) throws IOException, InterruptedException { Configuration conf = new Configuration(true); conf.set(S3_BUCKET_NAME, "hari_dev"); conf.set(S3_KEY_PREFIX, "users/299/avatar_f"); conf.setInt(S3_NUM_OF_KEYS_PER_MAPPER, 1); S3InputFormat<Text, S3ObjectSummaryWritable> s3FileInput = new S3ObjectSummaryInputFormat(); List<InputSplit> splits = s3FileInput.getSplits(new JobContext(conf, new JobID())); S3ObjectSummaryRecordReader reader = new S3ObjectSummaryRecordReader(); for (InputSplit inputSplit : splits) { System.out.println(inputSplit.toString()); reader.initialize(inputSplit, new TaskAttemptContext(conf, new TaskAttemptID())); int i = 0; while (reader.nextKeyValue()) { System.out.printf("\t%d Key=%s. Value=%s\n", i++, reader.getCurrentKey(), reader.getCurrentValue()); } } } }
@SuppressWarnings("unchecked") static <K,V> CombinerRunner<K,V> create(JobConf job, TaskAttemptID taskId, Counters.Counter inputCounter, TaskReporter reporter, org.apache.hadoop.mapreduce.OutputCommitter committer ) throws ClassNotFoundException { Class<? extends Reducer<K,V,K,V>> cls = (Class<? extends Reducer<K,V,K,V>>) job.getCombinerClass(); if (cls != null) { return new OldCombinerRunner(cls, job, inputCounter, reporter); } // make a task context so we can get the classes org.apache.hadoop.mapreduce.TaskAttemptContext taskContext = new org.apache.hadoop.mapreduce.TaskAttemptContext(job, taskId); Class<? extends org.apache.hadoop.mapreduce.Reducer<K,V,K,V>> newcls = (Class<? extends org.apache.hadoop.mapreduce.Reducer<K,V,K,V>>) taskContext.getCombinerClass(); if (newcls != null) { return new NewCombinerRunner<K,V>(newcls, job, taskId, taskContext, inputCounter, reporter, committer); } return null; } }
private void runCDXTest(Configuration conf, String expected) throws Exception { File testFile = new File("src/test/resources/rr-test-inputs.txt"); Path path = new Path(testFile.getAbsoluteFile().toURI().toString()); FileSplit split = new FileSplit(path, 0, testFile.length(), null); ArchiveToCDXFileInputFormat inputFormat = ReflectionUtils .newInstance(ArchiveToCDXFileInputFormat.class, conf); TaskAttemptContext context = new TaskAttemptContext(conf, new TaskAttemptID()); RecordReader<Text, Text> reader = inputFormat.createRecordReader(split, context); reader.initialize(split, context); int position = 0; String value = ""; while (reader.nextKeyValue() != false) { position += 1; if (position == 3) value = reader.getCurrentValue().toString(); } // Check the third value is as expected log.debug(value); Assert.assertEquals(expected, value); }
new org.apache.hadoop.mapreduce.TaskAttemptContext(job, getTaskID());
InputSplit split = splits.get(i); TaskAttemptID taskID = new TaskAttemptID(); TaskAttemptContext taskContext = new TaskAttemptContext(hiveConf, taskID); if (i % args.splitPrintPeriod == 0) { System.err.println("Handling split " + i + " of " + splits.size());
TaskAttemptContext taskContext = new TaskAttemptContext(context.hiveConf, taskId); RecordReader<WritableComparable, HiveReadableRecord> recordReader; recordReader = context.hiveApiInputFormat.createRecordReader(split, taskContext);
new org.apache.hadoop.mapreduce.TaskAttemptContext(job, getTaskID());