"HDFS_BYTES_READ"); long hdfsRead; if (hdfsReadCntr != null && (hdfsRead = hdfsReadCntr.getValue()) >= 0) { sb.append(" HDFS Read: " + hdfsRead); "HDFS_BYTES_WRITTEN"); long hdfsWritten; if (hdfsWrittenCntr != null && (hdfsWritten = hdfsWrittenCntr.getValue()) >= 0) { sb.append(" HDFS Write: " + hdfsWritten);
"HDFS_BYTES_READ"); long hdfsRead; if (hdfsReadCntr != null && (hdfsRead = hdfsReadCntr.getValue()) >= 0) { sb.append(" HDFS Read: " + hdfsRead); "HDFS_BYTES_WRITTEN"); long hdfsWritten; if (hdfsWrittenCntr != null && (hdfsWritten = hdfsWrittenCntr.getValue()) >= 0) { sb.append(" HDFS Write: " + hdfsWritten); "HDFS_BYTES_READ_EC"); // FileSystemCounter.BYTES_READ_EC if (hdfsReadEcCntr != null) { long hdfsReadEc = hdfsReadEcCntr.getValue(); if (hdfsReadEc >= 0) { sb.append(" HDFS EC Read: " + hdfsReadEc);
public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) { if (ctrs == null) { // hadoop might return null if it cannot locate the job. // we may still be able to retrieve the job status - so ignore return false; } // check for number of created files Counters.Counter cntr = ctrs.findCounter(HiveConf.getVar(job, ConfVars.HIVECOUNTERGROUP), Operator.HIVE_COUNTER_CREATED_FILES); long numFiles = cntr != null ? cntr.getValue() : 0; long upperLimit = HiveConf.getLongVar(job, HiveConf.ConfVars.MAXCREATEDFILES); if (numFiles > upperLimit) { errMsg.append("total number of created files now is " + numFiles + ", which exceeds ").append(upperLimit); return true; } return this.callBackObj.checkFatalErrors(ctrs, errMsg); }
protected long getCounter(org.apache.hadoop.mapreduce.Counters cntrs, String counterGroupName, String counterName) throws IOException { Counters counters = Counters.downgrade(cntrs); return counters.findCounter(counterGroupName, counterName).getValue(); }
static long getCounterValue(CounterGroupBase<Counter> group, String counterName) { Counter counter = group.findCounter(counterName, false); if (counter != null) return counter.getValue(); return 0L; }
static long getCounterValue(CounterGroupBase<Counter> group, String counterName) { Counter counter = group.findCounter(counterName, false); if (counter != null) return counter.getValue(); return 0L; }
public synchronized void collect(K key, V value) throws IOException { outCounter.increment(1); writer.append(key, value); if ((outCounter.getValue() % progressBar) == 0) { progressable.progress(); } } }
/** * Returns current value of the specified counter, or 0 if the counter * does not exist. * @param key the counter enum to lookup * @return the counter value or 0 if counter not found */ public synchronized long getCounter(Enum<?> key) { return findCounter(key).getValue(); }
/** * Returns the value of the specified counter, or 0 if the counter does * not exist. */ public synchronized long getCounter(String counterName) { for(Counter counter: subcounters.values()) { if (counter != null && counter.getDisplayName().equals(counterName)) { return counter.getValue(); } } return 0L; }
/** * Returns current value of the specified counter, or 0 if the counter * does not exist. * @param key the counter enum to lookup * @return the counter value or 0 if counter not found */ public synchronized long getCounter(Enum<?> key) { return findCounter(key).getValue(); }
counter.setValue(initValue); assertEquals("Counter value is not initialized correctly", expectedValue, counter.getValue()); for (int j = 0; j < NUMBER_INC; j++) { int incValue = rand.nextInt(); expectedValue += incValue; assertEquals("Counter value is not incremented correctly", expectedValue, counter.getValue()); expectedValue, counter.getValue());
@SuppressWarnings("deprecation") private void checkLegacyNames(Counters counters) { assertEquals("New name", 1, counters.findCounter( TaskCounter.class.getName(), "MAP_INPUT_RECORDS").getValue()); assertEquals("Legacy name", 1, counters.findCounter( "org.apache.hadoop.mapred.Task$Counter", "MAP_INPUT_RECORDS").getValue()); assertEquals("Legacy enum", 1, counters.findCounter(Task.Counter.MAP_INPUT_RECORDS).getValue()); assertEquals("New name", 1, counters.findCounter( JobCounter.class.getName(), "DATA_LOCAL_MAPS").getValue()); assertEquals("Legacy name", 1, counters.findCounter( "org.apache.hadoop.mapred.JobInProgress$Counter", "DATA_LOCAL_MAPS").getValue()); assertEquals("Legacy enum", 1, counters.findCounter(JobInProgress.Counter.DATA_LOCAL_MAPS).getValue()); assertEquals("New name", 1, counters.findCounter( FileSystemCounter.class.getName(), "FILE_BYTES_READ").getValue()); assertEquals("New name and method", 1, counters.findCounter("file", FileSystemCounter.BYTES_READ).getValue()); assertEquals("Legacy name", 1, counters.findCounter( "FileSystemCounters", "FILE_BYTES_READ").getValue()); }
"CPU_MILLISECONDS"); if (counterCpuMsec != null) { long newCpuMSec = counterCpuMsec.getValue(); if (newCpuMSec > 0) { cpuMsec = newCpuMSec; "CPU_MILLISECONDS"); if (counterCpuMsec != null) { long newCpuMSec = counterCpuMsec.getValue(); if (newCpuMSec > cpuMsec) { cpuMsec = newCpuMSec; FileSinkOperator.TOTAL_TABLE_ROWS_WRITTEN); if (counter != null) { mapRedStats.setNumModifiedRows(counter.getValue());
/** * Fatal errors are those errors that cannot be recovered by retries. These are application * dependent. Examples of fatal errors include: - the small table in the map-side joins is too * large to be feasible to be handled by one mapper. The job should fail and the user should be * warned to use regular joins rather than map-side joins. Fatal errors are indicated by counters * that are set at execution time. If the counter is non-zero, a fatal error occurred. The value * of the counter indicates the error type. * * @return true if fatal errors happened during job execution, false otherwise. */ @Override public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) { Counters.Counter cntr = ctrs.findCounter( HiveConf.getVar(job, HiveConf.ConfVars.HIVECOUNTERGROUP), Operator.HIVECOUNTERFATAL); return cntr != null && cntr.getValue() > 0; }
public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) { if (ctrs == null) { // hadoop might return null if it cannot locate the job. // we may still be able to retrieve the job status - so ignore return false; } // check for number of created files Counters.Counter cntr = ctrs.findCounter(HiveConf.getVar(job, ConfVars.HIVECOUNTERGROUP), Operator.HIVECOUNTERCREATEDFILES); long numFiles = cntr != null ? cntr.getValue() : 0; long upperLimit = HiveConf.getLongVar(job, HiveConf.ConfVars.MAXCREATEDFILES); if (numFiles > upperLimit) { errMsg.append("total number of created files now is " + numFiles + ", which exceeds ").append(upperLimit); return true; } return this.callBackObj.checkFatalErrors(ctrs, errMsg); }
/** * Fatal errors are those errors that cannot be recovered by retries. These are application * dependent. Examples of fatal errors include: - the small table in the map-side joins is too * large to be feasible to be handled by one mapper. The job should fail and the user should be * warned to use regular joins rather than map-side joins. Fatal errors are indicated by counters * that are set at execution time. If the counter is non-zero, a fatal error occurred. The value * of the counter indicates the error type. * * @return true if fatal errors happened during job execution, false otherwise. */ @Override public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) { Counters.Counter cntr = ctrs.findCounter( HiveConf.getVar(job, HiveConf.ConfVars.HIVECOUNTERGROUP), Operator.HIVE_COUNTER_FATAL); return cntr != null && cntr.getValue() > 0; }
public void run(JobConf conf) throws Exception { _runningJob = new JobClient(conf).submitJob(conf); info("See " + _runningJob.getTrackingURL() + " for details."); _runningJob.waitForCompletion(); if(!_runningJob.isSuccessful()) { throw new Exception("Hadoop job:" + getId() + " failed!"); } // dump all counters Counters counters = _runningJob.getCounters(); for(String groupName: counters.getGroupNames()) { Counters.Group group = counters.getGroup(groupName); info("Group: " + group.getDisplayName()); for(Counter counter: group) info(counter.getDisplayName() + ":\t" + counter.getValue()); } }
public CounterInfo(Counters.Counter counter) { this.name = counter.getName(); this.displayName = counter.getDisplayName(); this.value = counter.getValue(); }