public static void killRunningJobs() { synchronized (runningJobs) { for (RunningJob rj : runningJobs) { try { System.err.println("killing job with: " + rj.getID()); rj.killJob(); } catch (Exception e) { LOG.warn("Failed to kill job", e); System.err.println("Failed to kill job: "+ rj.getID()); // do nothing } } } }
public static void killRunningJobs() { synchronized (runningJobs) { for (RunningJob rj : runningJobs) { try { System.err.println("killing job with: " + rj.getID()); rj.killJob(); } catch (Exception e) { LOG.warn("Failed to kill job", e); System.err.println("Failed to kill job: "+ rj.getID()); // do nothing } } } }
private void killJob() { boolean needToKillJob = false; synchronized(this) { if (rj != null && !jobKilled) { jobKilled = true; needToKillJob = true; } } if (needToKillJob) { try { rj.killJob(); } catch (Exception e) { LOG.warn("failed to kill job " + rj.getID(), e); } } } }
@Override public void shutdown() { super.shutdown(); if (rj != null) { try { rj.killJob(); } catch (Exception e) { LOG.warn("failed to kill job " + rj.getID(), e); } rj = null; } }
private void updateMapRedStatsJson(MapRedStats stats, RunningJob rj) throws IOException, JSONException { if (statsJSON == null) { statsJSON = new JSONObject(); } if (stats != null) { if (stats.getNumMap() >= 0) { statsJSON.put(NUMBER_OF_MAPPERS, stats.getNumMap()); } if (stats.getNumReduce() >= 0) { statsJSON.put(NUMBER_OF_REDUCERS, stats.getNumReduce()); } if (stats.getCounters() != null) { statsJSON.put(COUNTERS, getCountersJson(stats.getCounters())); } } if (rj != null) { statsJSON.put(JOB_ID, rj.getID().toString()); statsJSON.put(JOB_FILE, rj.getJobFile()); statsJSON.put(TRACKING_URL, rj.getTrackingURL()); statsJSON.put(MAP_PROGRESS, Math.round(rj.mapProgress() * 100)); statsJSON.put(REDUCE_PROGRESS, Math.round(rj.reduceProgress() * 100)); statsJSON.put(CLEANUP_PROGRESS, Math.round(rj.cleanupProgress() * 100)); statsJSON.put(SETUP_PROGRESS, Math.round(rj.setupProgress() * 100)); statsJSON.put(COMPLETE, rj.isComplete()); statsJSON.put(SUCCESSFUL, rj.isSuccessful()); } }
private MapReduceJobState getMapReduceJobState(MapReduceJob job, JobClient jobClient) throws Exception { RunningJob runningJob = getRunningJob(job, jobClient); JobID jobID = runningJob.getID(); TaskReport[] mapTaskReport = jobClient.getMapTaskReports(jobID); TaskReport[] reduceTaskReport = jobClient.getReduceTaskReports(jobID); return new MapReduceJobState(runningJob, mapTaskReport, reduceTaskReport); }
/** * from StreamJob.java. */ public void jobInfo(RunningJob rj) { if (ShimLoader.getHadoopShims().isLocalMode(job)) { console.printInfo("Job running in-process (local Hadoop)"); } else { if (SessionState.get() != null) { SessionState.get().getHiveHistory().setTaskProperty(queryId, getId(), Keys.TASK_HADOOP_ID, rj.getID().toString()); } console.printInfo(getJobStartMsg(rj.getID()) + ", Tracking URL = " + rj.getTrackingURL()); console.printInfo("Kill Command = " + HiveConf.getVar(job, ConfVars.MAPREDBIN) + " job -kill " + rj.getID()); } }
/** * from StreamJob.java. */ public void jobInfo(RunningJob rj) { if (ShimLoader.getHadoopShims().isLocalMode(job)) { console.printInfo("Job running in-process (local Hadoop)"); } else { if (SessionState.get() != null) { SessionState.get().getHiveHistory().setTaskProperty(queryId, getId(), Keys.TASK_HADOOP_ID, rj.getID().toString()); } console.printInfo(getJobStartMsg(rj.getID()) + ", Tracking URL = " + rj.getTrackingURL()); console.printInfo("Kill Command = " + HiveConf.getVar(job, HiveConf.ConfVars.HADOOPBIN) + " job -kill " + rj.getID()); } }
RunningJob rj = jc.submitJob(job); LOG.info("Submitted compaction job '" + job.getJobName() + "' with jobID=" + rj.getID() + " compaction ID=" + id); try { msc.setHadoopJobid(rj.getID().toString(), id); } catch (TException e) { LOG.warn("Error setting hadoop job, jobId=" + rj.getID().toString() + " compactionId=" + id, e); if (!rj.isSuccessful()) { throw new IOException((compactionType == CompactionType.MAJOR ? "Major" : "Minor") + " compactor job failed for " + jobName + "! Hadoop JobId: " + rj.getID());
", obsolete delta dirs count=" + obsoleteDirNumber + ". TxnIdRange[" + minTxn + "," + maxTxn + "]"); RunningJob rj = new JobClient(job).submitJob(job); LOG.info("Submitted compaction job '" + job.getJobName() + "' with jobID=" + rj.getID() + " compaction ID=" + id); txnHandler.setHadoopJobId(rj.getID().toString(), id); rj.waitForCompletion(); if (!rj.isSuccessful()) { throw new IOException(compactionType == CompactionType.MAJOR ? "Major" : "Minor" + " compactor job failed for " + jobName + "! Hadoop JobId: " + rj.getID() );
TaskReport[] mappers = jc.getMapTaskReports(rj.getID()); if (mappers == null) { logMapper = "no information for number of mappers; "; TaskReport[] reducers = jc.getReduceTaskReports(rj.getID()); if (reducers == null) { logReducer = "no information for number of reducers. "; RunningJob newRj = jc.getJob(rj.getID()); if (newRj == null) { throw new IOException("Could not find status of job:" + rj.getID()); } else { th.setRunningJob(newRj); for (ClientStatsPublisher clientStatPublisher : clientStatPublishers) { try { clientStatPublisher.run(exctractedCounters, rj.getID().toString()); } catch (RuntimeException runtimeException) { LOG.error("Exception " + runtimeException.getClass().getCanonicalName() MapRedStats mapRedStats = new MapRedStats(numMap, numReduce, cpuMsec, success, rj.getID().toString()); mapRedStats.setCounters(ctrs);
job, numMap, numReduce, cpuMsec, false, rj.getID().toString()); updateMapRedTaskWebUIStatistics(mapRedStats, rj); TaskReport[] mappers = jc.getMapTaskReports(rj.getID()); if (mappers == null) { logMapper = "no information for number of mappers; "; TaskReport[] reducers = jc.getReduceTaskReports(rj.getID()); if (reducers == null) { logReducer = "no information for number of reducers. "; RunningJob newRj = jc.getJob(rj.getID()); if (newRj == null) { throw new IOException("Could not find status of job:" + rj.getID()); } else { th.setRunningJob(newRj); for (ClientStatsPublisher clientStatPublisher : clientStatPublishers) { try { clientStatPublisher.run(exctractedCounters, rj.getID().toString()); } catch (RuntimeException runtimeException) { LOG.error("Exception " + runtimeException.getClass().getCanonicalName()
TaskReport[] reduceTaskReport ) throws IOException { jobId = runningJob.getID().toString(); jobName = runningJob.getJobName(); trackingURL = runningJob.getTrackingURL();
killJob(); jobID = rj.getID().toString();
rj.killJob(); jobID = rj.getID().toString();
public JobID getJobId() { if (rJob == null) { return null; } return rJob.getID(); }
/** * Get the job identifier. * * @return the job identifier. */ public JobID getID() { ensureState(JobState.RUNNING); return info.getID(); }
protected void internalNonBlockingStart() throws IOException { jobClient = new JobClient( jobConfiguration ); runningJob = internalNonBlockingSubmit(); flowStep.logInfo( "submitted hadoop job: " + runningJob.getID() ); if( runningJob.getTrackingURL() != null ) flowStep.logInfo( "tracking url: " + runningJob.getTrackingURL() ); }