Refine search
/** * Used by TestExportSnapshot to test for retries when failures happen. * Failure is injected in {@link #copyFile(Context, SnapshotFileInfo, Path)}. */ private void injectTestFailure(final Context context, final SnapshotFileInfo inputInfo) throws IOException { if (!context.getConfiguration().getBoolean(Testing.CONF_TEST_FAILURE, false)) return; if (testing.injectedFailureCount >= testing.failuresCountToInject) return; testing.injectedFailureCount++; context.getCounter(Counter.COPY_FAILED).increment(1); LOG.debug("Injecting failure. Count: " + testing.injectedFailureCount); throw new IOException(String.format("TEST FAILURE (%d of max %d): Unable to copy input=%s", testing.injectedFailureCount, testing.failuresCountToInject, inputInfo)); }
protected void map(LongWritable key, Text value, Mapper.Context context) throws IOException try { XMLStreamReader reader = ... context.write(...); } catch(XMLStreamException e){ // do nothing context.getCounter(INVALID_RECORDS).increment(1); } }
@Override protected void innerMap( InputRow inputRow, Context context ) throws IOException, InterruptedException { final List<Object> groupKey = Rows.toGroupKey( rollupGranularity.bucketStart(inputRow.getTimestamp()).getMillis(), inputRow ); context.write( new BytesWritable(HadoopDruidIndexerConfig.JSON_MAPPER.writeValueAsBytes(groupKey)), NullWritable.get() ); context.getCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_COUNTER).increment(1); } }
context.getCounter(c).increment(0);
/** * Maps the data. * * @param row The current table row key. * @param values The columns. * @param context The current context. * @throws IOException When something is broken with the data. * @see org.apache.hadoop.mapreduce.Mapper#map(Object, Object, Context) */ @Override public void map(ImmutableBytesWritable row, Result values, Context context) throws IOException { // Count every row containing data, whether it's in qualifiers or values context.getCounter(Counters.ROWS).increment(1); } }
if (outputStat != null && sameFile(inputStat, outputStat)) { LOG.info("Skip copy " + inputStat.getPath() + " to " + outputPath + ", same file."); context.getCounter(Counter.FILES_SKIPPED).increment(1); context.getCounter(Counter.BYTES_SKIPPED).increment(inputStat.getLen()); return; context.getCounter(Counter.BYTES_EXPECTED).increment(inputStat.getLen());
private void logFailRowAndIncreaseCounter(Context context, Counters counter, Result row) { if (sleepMsBeforeReCompare > 0) { Threads.sleep(sleepMsBeforeReCompare); try { Result sourceResult = sourceTable.get(new Get(row.getRow())); Result replicatedResult = replicatedTable.get(new Get(row.getRow())); Result.compareResults(sourceResult, replicatedResult); if (!sourceResult.isEmpty()) { context.getCounter(Counters.GOODROWS).increment(1); if (verbose) { LOG.info("Good row key (with recompare): " + delimiter + Bytes.toStringBinary(row.getRow()) + delimiter); } } return; } catch (Exception e) { LOG.error("recompare fail after sleep, rowkey=" + delimiter + Bytes.toStringBinary(row.getRow()) + delimiter); } } context.getCounter(counter).increment(1); context.getCounter(Counters.BADROWS).increment(1); LOG.error(counter.toString() + ", rowkey=" + delimiter + Bytes.toStringBinary(row.getRow()) + delimiter); }
currentFamily = null; currentQualifier = null; context.getCounter(Counters.ROWS).increment(1); context.write(new Text("Total ROWS"), new IntWritable(1)); currentFamilyName = Bytes.toStringBinary(currentFamily); currentQualifier = null; context.getCounter("CF", currentFamilyName).increment(1); if (1 == context.getCounter("CF", currentFamilyName).getValue()) { context.write(new Text("Total Families Across all Rows"), new IntWritable(1)); context.write(new Text(currentFamily), new IntWritable(1)); context.write(new Text(currentRowQualifierName + "_Versions"), new IntWritable(1)); context.getCounter(Counters.CELLS).increment(cellCount);
context.getCounter(Counter.BYTES_COPIED).increment(reportBytes); context.setStatus(String.format(statusMessage, StringUtils.humanReadableInt(totalBytesWritten), context.getCounter(Counter.BYTES_COPIED).increment(reportBytes); context.setStatus(String.format(statusMessage, StringUtils.humanReadableInt(totalBytesWritten), " time=" + StringUtils.formatTimeDiff(etime, stime) + String.format(" %.3fM/sec", (totalBytesWritten / ((etime - stime)/1000.0))/1048576.0)); context.getCounter(Counter.FILES_COPIED).increment(1); } catch (IOException e) { LOG.error("Error copying " + inputPath + " to " + outputPath, e); context.getCounter(Counter.COPY_FAILED).increment(1); throw e;
); context.write( new SortableBytes( bucket.get().toGroupKey(), throw pe; } else { context.getCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_COUNTER).increment(1);
@Override protected void map(AvroKey<GenericRecord> key, NullWritable value, Context context) throws IOException, InterruptedException { if (context.getNumReduceTasks() == 0) { context.write(key, NullWritable.get()); } else { populateComparableKeyRecord(key.datum(), this.outKey.datum()); this.outValue.datum(key.datum()); try { context.write(this.outKey, this.outValue); } catch (AvroRuntimeException e) { final Path[] paths = ((CombineFileSplit) context.getInputSplit()).getPaths(); throw new IOException("Unable to process paths " + StringUtils.join(paths, ','), e); } } context.getCounter(EVENT_COUNTER.RECORD_COUNT).increment(1); }
.add(hashFunction.hashBytes(HadoopDruidIndexerConfig.JSON_MAPPER.writeValueAsBytes(groupKey)).asBytes()); context.getCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_COUNTER).increment(1);
context.getCounter(BatchConstants.MAPREDUCE_COUNTER_GROUP_NAME, "Skipped records").increment(1L); if (skipCounter++ % BatchConstants.NORMAL_RECORD_LOG_THRESHOLD == 0) { logger.info("Skipping record with ordinal: " + skipCounter); context.getCounter(BatchConstants.MAPREDUCE_COUNTER_GROUP_NAME, "Processed records").increment(1L); result = ndCuboidBuilder.buildKey(parentCuboid, childCuboid, rowKeySplitter.getSplitBuffers()); outputKey.set(result.getSecond().array(), 0, result.getFirst()); context.write(outputKey, value);
context.getCounter(COUNTER_GROUP, COUNTER_LOADED).increment(inSize); final String finalSegmentString = HadoopDruidConverterConfig.jsonMapper.writeValueAsString(finalSegment); context.getConfiguration().set(ConvertingOutputFormat.PUBLISHED_SEGMENT_KEY, finalSegmentString); context.write(new Text("dataSegment"), new Text(finalSegmentString)); context.getCounter(COUNTER_GROUP, COUNTER_WRITTEN).increment(finalSegment.getSize()); context.progress(); context.setStatus("Ready To Commit");
private void handleParseException(ParseException pe, Context context) { context.getCounter(HadoopDruidIndexerConfig.IndexJobCounters.INVALID_ROW_COUNTER).increment(1); Counter unparseableCounter = context.getCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_UNPARSEABLE_COUNTER); Counter processedWithErrorsCounter = context.getCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_WITH_ERRORS_COUNTER); if (pe.isFromPartiallyValidRow()) { processedWithErrorsCounter.increment(1); } else { unparseableCounter.increment(1); } if (config.isLogParseExceptions()) { log.error(pe, "Encountered parse exception: "); } long rowsUnparseable = unparseableCounter.getValue(); long rowsProcessedWithError = processedWithErrorsCounter.getValue(); if (rowsUnparseable + rowsProcessedWithError > config.getMaxParseExceptions()) { log.error("Max parse exceptions exceeded, terminating task..."); throw new RuntimeException("Max parse exceptions exceeded, terminating task...", pe); } }
private FileStatus getSourceFileStatus(Context context, final SnapshotFileInfo fileInfo) throws IOException { try { Configuration conf = context.getConfiguration(); FileLink link = null; switch (fileInfo.getType()) { case HFILE: Path inputPath = new Path(fileInfo.getHfile()); link = getFileLink(inputPath, conf); break; case WAL: link = new WALLink(inputRoot, fileInfo.getWalServer(), fileInfo.getWalName()); break; default: throw new IOException("Invalid File Type: " + fileInfo.getType().toString()); } return link.getFileStatus(inputFs); } catch (FileNotFoundException e) { context.getCounter(Counter.MISSING_FILES).increment(1); LOG.error("Unable to get the status for source file=" + fileInfo.toString(), e); throw e; } catch (IOException e) { LOG.error("Unable to get the status for source file=" + fileInfo.toString(), e); throw e; } }
context.getCounter(Counters.GOODROWS).increment(1); if (verbose) { LOG.info("Good row key: " + delimiter
@Override protected void map(LongWritable key, Text value, final Context context) throws IOException, InterruptedException { Status status = new Status() { @Override public void setStatus(String msg) { context.setStatus(msg); } }; ObjectMapper mapper = new ObjectMapper(); TestOptions opts = mapper.readValue(value.toString(), TestOptions.class); Configuration conf = HBaseConfiguration.create(context.getConfiguration()); final Connection con = ConnectionFactory.createConnection(conf); AsyncConnection asyncCon = null; try { asyncCon = ConnectionFactory.createAsyncConnection(conf).get(); } catch (ExecutionException e) { throw new IOException(e); } // Evaluation task RunResult result = PerformanceEvaluation.runOneClient(this.cmd, conf, con, asyncCon, opts, status); // Collect how much time the thing took. Report as map output and // to the ELAPSED_TIME counter. context.getCounter(Counter.ELAPSED_TIME).increment(result.duration); context.getCounter(Counter.ROWS).increment(opts.perClientRunRows); context.write(new LongWritable(opts.startRow), new LongWritable(result.duration)); context.progress(); } }
LOG.debug("Target missing cell: " + sourceCell); context.getCounter(Counter.TARGETMISSINGCELLS).increment(1); matchingRow = false; LOG.debug("Source missing cell: " + targetCell); context.getCounter(Counter.SOURCEMISSINGCELLS).increment(1); matchingRow = false; targetCell.getValueOffset(), targetCell.getValueLength())); context.getCounter(Counter.DIFFERENTCELLVALUES).increment(1); matchingRow = false; context.write(new ImmutableBytesWritable(rowKey), put); put = null; context.write(new ImmutableBytesWritable(rowKey), put); context.getCounter(Counter.MATCHINGCELLS).increment(matchingCells); context.getCounter(Counter.MATCHINGROWS).increment(1); return true; } else { context.getCounter(Counter.ROWSWITHDIFFS).increment(1); return false;
/** * Try to open the "source" file. * Throws an IOException if the communication with the inputFs fail or * if the file is not found. */ private FSDataInputStream openSourceFile(Context context, final SnapshotFileInfo fileInfo) throws IOException { try { Configuration conf = context.getConfiguration(); FileLink link = null; switch (fileInfo.getType()) { case HFILE: Path inputPath = new Path(fileInfo.getHfile()); link = getFileLink(inputPath, conf); break; case WAL: String serverName = fileInfo.getWalServer(); String logName = fileInfo.getWalName(); link = new WALLink(inputRoot, serverName, logName); break; default: throw new IOException("Invalid File Type: " + fileInfo.getType().toString()); } return link.open(inputFs); } catch (IOException e) { context.getCounter(Counter.MISSING_FILES).increment(1); LOG.error("Unable to open source file=" + fileInfo.toString(), e); throw e; } }