@Override protected void cleanup( Context context ) throws IOException { final String tmpDirLoc = context.getConfiguration().get(TMP_FILE_LOC_KEY); final File tmpDir = Paths.get(tmpDirLoc).toFile(); FileUtils.deleteDirectory(tmpDir); context.progress(); context.setStatus("Clean"); } }
@Override public void run() { try { int count = 0; while (sendReport) { // Periodically report progress on the Context object // to prevent TaskTracker from killing the Templeton // Controller task context.progress(); count++; String msg = "KeepAlive Heart beat" + makeDots(count); LOG.info(msg); Thread.sleep(KEEP_ALIVE_MSEC); } } catch (InterruptedException e) { // Ok to be interrupted } } }
context.progress(); final Path inPath = new Path(JobHelper.getURIFromSegment(segment)); final File inDir = new File(tmpDir, "in"); context.progress(); final File outDir = new File(tmpDir, "out"); FileUtils.forceMkdir(outDir); HadoopDruidConverterConfig.INDEX_IO.validateTwoSegments(inDir, outDir); context.progress(); context.setStatus("Starting PUSH"); final Path baseOutputPath = new Path(config.getSegmentOutputPath()); config.DATA_SEGMENT_PUSHER ); context.progress(); context.setStatus("Finished PUSH"); final String finalSegmentString = HadoopDruidConverterConfig.jsonMapper.writeValueAsString(finalSegment); context.progress(); context.setStatus("Ready To Commit");
@Override public void mapperProgress() { if (mappercontext != null && System.currentTimeMillis() - lastprogress > 300000) { mappercontext.progress(); lastprogress = System.currentTimeMillis(); } }
@Override public void mapperProgress() { if (mappercontext != null && System.currentTimeMillis() - lastprogress > 300000) { mappercontext.progress(); lastprogress = System.currentTimeMillis(); } }
/** * Executes postSave() on worker observers. */ private void postSaveOnWorkerObservers() { for (WorkerObserver obs : serviceWorker.getWorkerObservers()) { obs.postSave(); context.progress(); } }
@Override public void testFailure(Failure failure) throws Exception { log.info("Test failed: {}", failure.getDescription(), failure.getException()); failures.add(failure.getDescription().getMethodName()); context.progress(); }
@Override public void testFinished(Description description) throws Exception { log.info("Finished {}", description); context.progress(); }
@Override public void progress() { master.getContext().progress(); }
context.progress(); } catch (SQLException e) { LOG.error(" Error {} while read/write of a record ", e.getMessage());
@Override protected void map(NullWritable key, PhoenixIndexDBWritable record, Context context) throws IOException, InterruptedException { try { currentBatchCount++; final List<Object> values = record.getValues(); indxWritable.setValues(values); indxWritable.write(this.pStatement); this.pStatement.execute(); final PhoenixConnection pconn = connection.unwrap(PhoenixConnection.class); MutationState currentMutationState = pconn.getMutationState(); if (mutationState == null) { mutationState = currentMutationState; } // Keep accumulating Mutations till batch size mutationState.join(currentMutationState); // Write Mutation Batch if (currentBatchCount % batchSize == 0) { writeBatch(mutationState, context); mutationState = null; } // Make sure progress is reported to Application Master. context.progress(); } catch (SQLException e) { LOG.error(" Error {} while read/write of a record ", e.getMessage()); context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(currentBatchCount); throw new RuntimeException(e); } context.getCounter(PhoenixJobCounters.INPUT_RECORDS).increment(1); }
@Override protected void map(NullWritable key, PhoenixIndexDBWritable record, Context context) throws IOException, InterruptedException { try { final List<Object> values = record.getValues(); context.getCounter(PhoenixJobCounters.INPUT_RECORDS).increment(1); currentBatchValues.add(new Pair<>(record.getRowTs(), values)); if (context.getCounter(PhoenixJobCounters.INPUT_RECORDS).getValue() % batchSize != 0) { // if we haven't hit the batch size, just report progress and move on to next record context.progress(); return; } else { // otherwise, process the batch processBatch(context); } context.progress(); // Make sure progress is reported to Application Master. } catch (SQLException | IllegalArgumentException e) { LOG.error(" Error while read/write of a record ", e); context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1); throw new IOException(e); } }
context.progress(); Thread.sleep(POLL_JOBPROGRESS_MSEC); } while (true);
context.progress();
@Override protected void map(LongWritable key, Text value, final Context context) throws IOException, InterruptedException { Status status = new Status() { @Override public void setStatus(String msg) { context.setStatus(msg); } }; ObjectMapper mapper = new ObjectMapper(); TestOptions opts = mapper.readValue(value.toString(), TestOptions.class); Configuration conf = HBaseConfiguration.create(context.getConfiguration()); final Connection con = ConnectionFactory.createConnection(conf); AsyncConnection asyncCon = null; try { asyncCon = ConnectionFactory.createAsyncConnection(conf).get(); } catch (ExecutionException e) { throw new IOException(e); } // Evaluation task RunResult result = PerformanceEvaluation.runOneClient(this.cmd, conf, con, asyncCon, opts, status); // Collect how much time the thing took. Report as map output and // to the ELAPSED_TIME counter. context.getCounter(Counter.ELAPSED_TIME).increment(result.duration); context.getCounter(Counter.ROWS).increment(opts.perClientRunRows); context.write(new LongWritable(opts.startRow), new LongWritable(result.duration)); context.progress(); } }
output.progress();
@Override protected void map(NullWritable key, NullWritable value, Context context) throws IOException, InterruptedException { String suffix = "/" + shortTaskId; int BLOCK_SIZE = (int) (recordsToWrite / 100); for (long i = 0; i < recordsToWrite;) { for (long idx = 0; idx < BLOCK_SIZE && i < recordsToWrite; idx++, i++) { int expIdx = rand.nextInt(BLOCK_SIZE) % VISIBILITY_EXPS_COUNT; String exp = VISIBILITY_EXPS[expIdx]; byte[] row = Bytes.add(Bytes.toBytes(i), Bytes.toBytes(suffix), Bytes.toBytes(exp)); Put p = new Put(row); p.addColumn(TEST_FAMILY, TEST_QUALIFIER, HConstants.EMPTY_BYTE_ARRAY); p.setCellVisibility(new CellVisibility(exp)); getCounter(expIdx).increment(1); mutator.mutate(p); if (i % 100 == 0) { context.setStatus("Written " + i + "/" + recordsToWrite + " records"); context.progress(); } } // End of block, flush all of them before we start writing anything // pointing to these! mutator.flush(); } }
protected void persist(Context output, long count, byte[][] prev, byte[][] current, byte[] id) throws IOException { for (int i = 0; i < current.length; i++) { if (i % 100 == 0) { // Tickle progress every so often else maprunner will think us hung output.progress(); } Put put = new Put(current[i]); put.addColumn(FAMILY_NAME, COLUMN_PREV, prev == null ? NO_KEY : prev[i]); if (count >= 0) { put.addColumn(FAMILY_NAME, COLUMN_COUNT, Bytes.toBytes(count + i)); } if (id != null) { put.addColumn(FAMILY_NAME, COLUMN_CLIENT, id); } // See if we are to write multiple columns. if (this.multipleUnevenColumnFamilies) { // Use any column name. put.addColumn(TINY_FAMILY_NAME, TINY_FAMILY_NAME, this.tinyValue); // Use any column name. put.addColumn(BIG_FAMILY_NAME, BIG_FAMILY_NAME, this.bigValue); } mutator.mutate(put); } mutator.flush(); }
@Override protected void map(NullWritable key, PeInputSplit value, final Context context) throws IOException, InterruptedException { Status status = new Status() { @Override public void setStatus(String msg) { context.setStatus(msg); } }; // Evaluation task pe.tableName = value.getTableName(); long elapsedTime = this.pe.runOneClient(this.cmd, value.getStartRow(), value.getRows(), value.getTotalRows(), value.isFlushCommits(), value.isWriteToWAL(), value.isUseTags(), value.getNoOfTags(), ConnectionFactory.createConnection(context.getConfiguration()), status); // Collect how much time the thing took. Report as map output and // to the ELAPSED_TIME counter. context.getCounter(Counter.ELAPSED_TIME).increment(elapsedTime); context.getCounter(Counter.ROWS).increment(value.rows); context.write(new LongWritable(value.startRow), new LongWritable(elapsedTime)); context.progress(); } }
@Override protected void updateTaskMetrics() { super.updateTaskMetrics(); WorkUnit workUnit = this.task.getTaskState().getWorkunit(); if (GobblinMetrics.isEnabled(workUnit)) { if (workUnit.getPropAsBoolean(ConfigurationKeys.MR_REPORT_METRICS_AS_COUNTERS_KEY, ConfigurationKeys.DEFAULT_MR_REPORT_METRICS_AS_COUNTERS)) { updateCounters(this.task); } } // Tell the TaskTracker it's making progress this.context.progress(); } }