@Override public void abortTask(TaskAttemptContext taskContext) throws IOException { tof.abort(taskContext.getTaskAttemptID()); } }
@Override public boolean needsTaskCommit(TaskAttemptContext taskContext) throws IOException { return tof.hasModifications(taskContext.getTaskAttemptID()); }
@Override public void commitTask(TaskAttemptContext taskContext) throws IOException { tof.commit(taskContext.getTaskAttemptID()); }
@Override public boolean needsTaskCommit(TaskAttemptContext arg0) throws IOException { return this.attemptIdToMultiTaskAttempt.containsKey(arg0.getTaskAttemptID().toString()); }
private String generateKey(TaskAttemptContext context) throws IOException { String jobInfoString = context.getConfiguration().get(HCatConstants.HCAT_KEY_OUTPUT_INFO); if (StringUtils.isBlank(jobInfoString)) { // Avoid the NPE. throw new IOException("Could not retrieve OutputJobInfo for TaskAttempt " + context.getTaskAttemptID()); } OutputJobInfo jobInfo = (OutputJobInfo) HCatUtil.deserialize(jobInfoString); return context.getTaskAttemptID().toString() + "@" + jobInfo.getLocation(); }
static synchronized String getOutputName(TaskAttemptContext context) { return context.getConfiguration().get("mapreduce.output.basename", "part") + "-" + NUMBER_FORMAT.format(context.getTaskAttemptID().getTaskID().getId()); }
@Override public void commitTask(TaskAttemptContext arg0) throws IOException { String taskAttemptId = arg0.getTaskAttemptID().toString(); LOG.info("Committing task attempt: "+ taskAttemptId); this.attemptIdToMultiTaskAttempt.get(taskAttemptId).commit(); }
public static TaskAttemptContext createTaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext context) { return createTaskAttemptContext(new JobConf(context.getConfiguration()), org.apache.hadoop.mapred.TaskAttemptID.forName(context.getTaskAttemptID().toString()), Reporter.NULL); }
/** * Get the TaskAttemptContext with the related OutputFormat configuration populated given the alias * and the actual TaskAttemptContext * @param alias the name given to the OutputFormat configuration * @param context the Mapper or Reducer Context * @return a copy of the TaskAttemptContext with the alias configuration populated */ public static TaskAttemptContext getTaskAttemptContext(String alias, TaskAttemptContext context) { String aliasConf = context.getConfiguration().get(getAliasConfName(alias)); TaskAttemptContext aliasContext = ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext( context.getConfiguration(), context.getTaskAttemptID()); addToConfig(aliasConf, aliasContext.getConfiguration()); return aliasContext; }
final Path taskAttemptDir = getTaskPath( context.getJobID(), context.getTaskAttemptID(), taskContext.getWorkingDirectory() );
@Override public RecordWriter<NullWritable, VertexWritable> getRecordWriter(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException { synchronized (this) { if (null == graph) { Configuration hadoopConf = taskAttemptContext.getConfiguration(); ModifiableHadoopConfiguration mhc = ModifiableHadoopConfiguration.of(TitanHadoopConfiguration.MAPRED_NS, hadoopConf); graph = (StandardTitanGraph) TitanFactory.open(mhc.getTitanGraphConf()); } } // Special case for a TP3 vertex program: persist only those properties whose keys are // returned by VertexProgram.getComputeKeys() if (null == persistableKeys) { try { persistableKeys = VertexProgram.createVertexProgram(graph, ConfUtil.makeApacheConfiguration(taskAttemptContext.getConfiguration())).getElementComputeKeys(); log.debug("Set persistableKeys={}", Joiner.on(",").join(persistableKeys)); } catch (Exception e) { log.debug("Unable to detect or instantiate vertex program", e); persistableKeys = ImmutableSet.of(); } } StandardTitanTx tx = transactions.computeIfAbsent(taskAttemptContext.getTaskAttemptID(), id -> (StandardTitanTx)graph.newTransaction()); return new TitanH1RecordWriter(taskAttemptContext, tx, persistableKeys); }
private void mockTaskAttemptContext(String indexType) { TaskAttemptID fakeTaskId = new TaskAttemptID(new TaskID("foo_task_" + indexType, 123, TaskType.REDUCE, 2), 2); when(fakeTaskAttemptContext.getTaskAttemptID()).thenReturn(fakeTaskId); when(fakeTaskAttemptContext.getConfiguration()).thenReturn(job.getConfiguration()); }
@Override public RecordReader<LongWritable, LongWritable> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException { int taskId = context.getTaskAttemptID().getTaskID().getId(); int numMapTasks = context.getConfiguration().getInt(NUM_MAPS_KEY, NUM_MAPS); int numIterations = context.getConfiguration().getInt(NUM_IMPORT_ROUNDS_KEY, NUM_IMPORT_ROUNDS); int iteration = context.getConfiguration().getInt(ROUND_NUM_KEY, 0); taskId = taskId + iteration * numMapTasks; numMapTasks = numMapTasks * numIterations; long chainId = Math.abs(new Random().nextLong()); chainId = chainId - (chainId % numMapTasks) + taskId; // ensure that chainId is unique per task and across iterations LongWritable[] keys = new LongWritable[] {new LongWritable(chainId)}; return new FixedRecordReader<>(keys, keys); } }
/** * Write random values to the writer assuming a table created using * {@link #FAMILIES} as column family descriptors */ private void writeRandomKeyValues(RecordWriter<ImmutableBytesWritable, Cell> writer, TaskAttemptContext context, Set<byte[]> families, int numRows) throws IOException, InterruptedException { byte keyBytes[] = new byte[Bytes.SIZEOF_INT]; int valLength = 10; byte valBytes[] = new byte[valLength]; int taskId = context.getTaskAttemptID().getTaskID().getId(); assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!"; final byte [] qualifier = Bytes.toBytes("data"); Random random = new Random(); for (int i = 0; i < numRows; i++) { Bytes.putInt(keyBytes, 0, i); random.nextBytes(valBytes); ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes); for (byte[] family : families) { Cell kv = new KeyValue(keyBytes, family, qualifier, valBytes); writer.write(key, kv); } } }
expect(context.getConfiguration()) .andReturn(job.getConfiguration()).anyTimes(); expect(context.getTaskAttemptID()) .andReturn(TaskAttemptID.forName("attempt_200707121733_0001_m_000000_0")) .anyTimes();
private void close(final StoreFileWriter w) throws IOException { if (w != null) { w.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(EnvironmentEdgeManager.currentTimeMillis())); w.appendFileInfo(BULKLOAD_TASK_KEY, Bytes.toBytes(context.getTaskAttemptID().toString())); w.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(true)); w.appendFileInfo(EXCLUDE_FROM_MINOR_COMPACTION_KEY, Bytes.toBytes(compactionExclude)); w.appendTrackedTimestampsToMetadata(); w.close(); } }
when(context.getTaskAttemptID()).thenReturn(tid0); when(context.getConfiguration()).thenReturn(conf);
static TaskAttemptContext getNamedTaskContext(TaskAttemptContext context, String namedOutput) throws IOException { Job job = getNamedJob(context, namedOutput); return new TaskAttemptContextImpl(job.getConfiguration(), context.getTaskAttemptID(), new WrappedStatusReporter(context)); }
@Override public void initialize(InputSplit inputSplit, TaskAttemptContext context) throws IOException, InterruptedException { CrunchInputSplit crunchSplit = (CrunchInputSplit) inputSplit; InputSplit delegateSplit = crunchSplit.getInputSplit(); delegate.initialize(delegateSplit, TaskAttemptContextFactory.create( crunchSplit.getConf(), context.getTaskAttemptID())); }
public CrunchRecordReader(InputSplit inputSplit, final TaskAttemptContext context) throws IOException, InterruptedException { CrunchInputSplit crunchSplit = (CrunchInputSplit) inputSplit; InputFormat<K, V> inputFormat = (InputFormat<K, V>) ReflectionUtils .newInstance(crunchSplit.getInputFormatClass(), crunchSplit.getConf()); this.delegate = inputFormat.createRecordReader( crunchSplit.getInputSplit(), TaskAttemptContextFactory.create( crunchSplit.getConf(), context.getTaskAttemptID())); }