Counter hdfsReadCntr = counters.findCounter("FileSystemCounters", "HDFS_BYTES_READ"); long hdfsRead; Counter hdfsWrittenCntr = counters.findCounter("FileSystemCounters", "HDFS_BYTES_WRITTEN"); long hdfsWritten;
/** * Constructs counter groups from job runtime statistics. Hive mangles Hadoop Counter data, * forming counter names with format "$groupName::$counterName". * * @param counterNameToValue mangled hadoop counters from hive. * @return counter groups by name. */ public static Map<String, CounterGroup> counterGroupInfoMap(Map<String, Double> counterNameToValue) { Counters counters = new Counters(); for (Map.Entry<String, ? extends Number> entry : counterNameToValue.entrySet()) { String key = entry.getKey(); Number value = entry.getValue(); String[] cNames = key.split("::"); String groupName = cNames[0]; String counterName = cNames[1]; Counter counter = counters.findCounter(groupName, counterName); counter.setValue(value.longValue()); } return CounterGroup.counterGroupsByName(counters); }
/** * Fatal errors are those errors that cannot be recovered by retries. These are application * dependent. Examples of fatal errors include: - the small table in the map-side joins is too * large to be feasible to be handled by one mapper. The job should fail and the user should be * warned to use regular joins rather than map-side joins. Fatal errors are indicated by counters * that are set at execution time. If the counter is non-zero, a fatal error occurred. The value * of the counter indicates the error type. * * @return true if fatal errors happened during job execution, false otherwise. */ @Override public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) { Counters.Counter cntr = ctrs.findCounter( HiveConf.getVar(job, HiveConf.ConfVars.HIVECOUNTERGROUP), Operator.HIVE_COUNTER_FATAL); return cntr != null && cntr.getValue() > 0; }
Counter hdfsReadCntr = counters.findCounter("FileSystemCounters", "HDFS_BYTES_READ"); long hdfsRead; Counter hdfsWrittenCntr = counters.findCounter("FileSystemCounters", "HDFS_BYTES_WRITTEN"); long hdfsWritten; Counter hdfsReadEcCntr = counters.findCounter("FileSystemCounters", "HDFS_BYTES_READ_EC"); // FileSystemCounter.BYTES_READ_EC if (hdfsReadEcCntr != null) {
/** * Fatal errors are those errors that cannot be recovered by retries. These are application * dependent. Examples of fatal errors include: - the small table in the map-side joins is too * large to be feasible to be handled by one mapper. The job should fail and the user should be * warned to use regular joins rather than map-side joins. Fatal errors are indicated by counters * that are set at execution time. If the counter is non-zero, a fatal error occurred. The value * of the counter indicates the error type. * * @return true if fatal errors happened during job execution, false otherwise. */ @Override public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) { Counters.Counter cntr = ctrs.findCounter( HiveConf.getVar(job, HiveConf.ConfVars.HIVECOUNTERGROUP), Operator.HIVECOUNTERFATAL); return cntr != null && cntr.getValue() > 0; }
public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) { if (ctrs == null) { // hadoop might return null if it cannot locate the job. // we may still be able to retrieve the job status - so ignore return false; } // check for number of created files Counters.Counter cntr = ctrs.findCounter(HiveConf.getVar(job, ConfVars.HIVECOUNTERGROUP), Operator.HIVE_COUNTER_CREATED_FILES); long numFiles = cntr != null ? cntr.getValue() : 0; long upperLimit = HiveConf.getLongVar(job, HiveConf.ConfVars.MAXCREATEDFILES); if (numFiles > upperLimit) { errMsg.append("total number of created files now is " + numFiles + ", which exceeds ").append(upperLimit); return true; } return this.callBackObj.checkFatalErrors(ctrs, errMsg); }
public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) { if (ctrs == null) { // hadoop might return null if it cannot locate the job. // we may still be able to retrieve the job status - so ignore return false; } // check for number of created files Counters.Counter cntr = ctrs.findCounter(HiveConf.getVar(job, ConfVars.HIVECOUNTERGROUP), Operator.HIVECOUNTERCREATEDFILES); long numFiles = cntr != null ? cntr.getValue() : 0; long upperLimit = HiveConf.getLongVar(job, HiveConf.ConfVars.MAXCREATEDFILES); if (numFiles > upperLimit) { errMsg.append("total number of created files now is " + numFiles + ", which exceeds ").append(upperLimit); return true; } return this.callBackObj.checkFatalErrors(ctrs, errMsg); }
@JsonIgnore public void setJobStats(HadoopStepStats stats) { Counters counters = new Counters(); for (String groupName : stats.getCounterGroups()) { for (String counterName : stats.getCountersFor(groupName)) { Long counterValue = stats.getCounterValue(groupName, counterName); counters.findCounter(groupName, counterName).setValue(counterValue); } } setCounterGroupMap(CounterGroup.counterGroupsByName(counters)); } }
@JsonIgnore public void setJobStats(HadoopStepStats stats) { Counters counters = new Counters(); for (String groupName : stats.getCounterGroups()) { for (String counterName : stats.getCountersFor(groupName)) { Long counterValue = stats.getCounterValue(groupName, counterName); counters.findCounter(groupName, counterName).setValue(counterValue); } } setCounterGroupMap(CounterGroup.counterGroupsByName(counters)); } }
void testInputFormat(Class<? extends InputFormat> clazz) throws IOException { Configuration conf = UTIL.getConfiguration(); final JobConf job = new JobConf(conf); job.setInputFormat(clazz); job.setOutputFormat(NullOutputFormat.class); job.setMapperClass(ExampleVerifier.class); job.setNumReduceTasks(0); LOG.debug("submitting job."); final RunningJob run = JobClient.runJob(job); assertTrue("job failed!", run.isSuccessful()); assertEquals("Saw the wrong number of instances of the filtered-for row.", 2, run.getCounters() .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getCounter()); assertEquals("Saw any instances of the filtered out row.", 0, run.getCounters() .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getCounter()); assertEquals("Saw the wrong number of instances of columnA.", 1, run.getCounters() .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getCounter()); assertEquals("Saw the wrong number of instances of columnB.", 1, run.getCounters() .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getCounter()); assertEquals("Saw the wrong count of values for the filtered-for row.", 2, run.getCounters() .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getCounter()); assertEquals("Saw the wrong count of values for the filtered-out row.", 0, run.getCounters() .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getCounter()); }
Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter", "CPU_MILLISECONDS"); if (counterCpuMsec != null) { Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter", "CPU_MILLISECONDS"); if (counterCpuMsec != null) { Counter counter = ctrs.findCounter( ss.getConf().getVar(HiveConf.ConfVars.HIVECOUNTERGROUP), FileSinkOperator.TOTAL_TABLE_ROWS_WRITTEN);
Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter", "CPU_MILLISECONDS"); if (counterCpuMsec != null) { Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter", "CPU_MILLISECONDS"); if (counterCpuMsec != null) {
public void run() { counters.findCounter("bad", "test"); } });
public void run() { counters.findCounter("test", "bad"); } });
private void setExpected(Counters counters) { counters.findCounter(FRAMEWORK_COUNTER).setValue(FRAMEWORK_COUNTER_VALUE); counters.findCounter(FS_SCHEME, FS_COUNTER).setValue(FS_COUNTER_VALUE); }
private void checkExpected(Counters counters) { assertEquals(FRAMEWORK_COUNTER_VALUE, counters.findCounter(FRAMEWORK_COUNTER).getValue()); assertEquals(FS_COUNTER_VALUE, counters.findCounter(FS_SCHEME, FS_COUNTER) .getValue()); }
private void testMaxCountersLimits(final Counters counters) { for (int i = 0; i < org.apache.hadoop.mapred.Counters.MAX_COUNTER_LIMIT; ++i) { counters.findCounter("test", "test" + i); } setExpected(counters); shouldThrow(CountersExceededException.class, new Runnable() { public void run() { counters.findCounter("test", "bad"); } }); checkExpected(counters); }
private void testMaxGroupsLimits(final Counters counters) { for (int i = 0; i < org.apache.hadoop.mapred.Counters.MAX_GROUP_LIMIT; ++i) { // assuming COUNTERS_MAX > GROUPS_MAX counters.findCounter("test" + i, "test"); } setExpected(counters); shouldThrow(CountersExceededException.class, new Runnable() { public void run() { counters.findCounter("bad", "test"); } }); checkExpected(counters); }
@SuppressWarnings("deprecation") @Test public void testWriteWithLegacyNames() { Counters counters = new Counters(); counters.incrCounter(Task.Counter.MAP_INPUT_RECORDS, 1); counters.incrCounter(JobInProgress.Counter.DATA_LOCAL_MAPS, 1); counters.findCounter("FileSystemCounters", "FILE_BYTES_READ").increment(1); checkLegacyNames(counters); }
@SuppressWarnings("deprecation") @Test public void testReadWithLegacyNames() { Counters counters = new Counters(); counters.incrCounter(TaskCounter.MAP_INPUT_RECORDS, 1); counters.incrCounter(JobCounter.DATA_LOCAL_MAPS, 1); counters.findCounter("file", FileSystemCounter.BYTES_READ).increment(1); checkLegacyNames(counters); }