/** * Returns all jobs tagged with the given tag that have been started after the * given timestamp. Returned jobIds are MapReduce JobIds. */ @Override public Set<String> getJobs(String tag, long timestamp) { Set<ApplicationId> childYarnJobs = getYarnChildJobs(tag, timestamp); Set<String> childJobs = new HashSet<String>(); for(ApplicationId id : childYarnJobs) { // Convert to a MapReduce job id String childJobId = TypeConverter.fromYarn(id).toString(); childJobs.add(childJobId); } return childJobs; }
public static String getApplicationWebURLOnJHSWithoutScheme(Configuration conf, ApplicationId appId) throws UnknownHostException { //construct the history url for job String addr = getJHSWebappURLWithoutScheme(conf); Iterator<String> it = ADDR_SPLITTER.split(addr).iterator(); it.next(); // ignore the bind host String port = it.next(); // Use hs address to figure out the host for webapp addr = conf.get(XLearningConfiguration.XLEARNING_HISTORY_ADDRESS, XLearningConfiguration.DEFAULT_XLEARNING_HISTORY_ADDRESS); String host = ADDR_SPLITTER.split(addr).iterator().next(); String hsAddress = JOINER.join(host, ":", port); InetSocketAddress address = NetUtils.createSocketAddr( hsAddress, getDefaultJHSWebappPort(), getDefaultJHSWebappURLWithoutScheme()); StringBuffer sb = new StringBuffer(); if (address.getAddress().isAnyLocalAddress() || address.getAddress().isLoopbackAddress()) { sb.append(InetAddress.getLocalHost().getCanonicalHostName()); } else { sb.append(address.getHostName()); } sb.append(":").append(address.getPort()); sb.append("/jobhistory/job/"); JobID jobId = TypeConverter.fromYarn(appId); sb.append(jobId.toString()); return sb.toString(); }
public static List<TaskReport> fromYarn( List<org.apache.hadoop.mapreduce.v2.api.records.TaskReport> taskReports) { List<TaskReport> reports = new ArrayList<TaskReport>(); for (org.apache.hadoop.mapreduce.v2.api.records.TaskReport r : taskReports) { reports.add(fromYarn(r)); } return reports; }
public static List<TaskReport> fromYarn( List<org.apache.hadoop.mapreduce.v2.api.records.TaskReport> taskReports) { List<TaskReport> reports = new ArrayList<TaskReport>(); for (org.apache.hadoop.mapreduce.v2.api.records.TaskReport r : taskReports) { reports.add(fromYarn(r)); } return reports; }
public static List<TaskReport> fromYarn( List<org.apache.hadoop.mapreduce.v2.api.records.TaskReport> taskReports) { List<TaskReport> reports = new ArrayList<TaskReport>(); for (org.apache.hadoop.mapreduce.v2.api.records.TaskReport r : taskReports) { reports.add(fromYarn(r)); } return reports; }
public static TaskTrackerInfo[] fromYarnNodes(List<NodeReport> nodes) { List<TaskTrackerInfo> taskTrackers = new ArrayList<TaskTrackerInfo>(); for (NodeReport node : nodes) { taskTrackers.add(fromYarn(node)); } return taskTrackers.toArray(new TaskTrackerInfo[nodes.size()]); }
public QueueInfo getQueue(String queueName) throws IOException, InterruptedException { try { org.apache.hadoop.yarn.api.records.QueueInfo queueInfo = client.getQueueInfo(queueName); return (queueInfo == null) ? null : TypeConverter.fromYarn(queueInfo, conf); } catch (YarnException e) { throw new IOException(e); } }
/** * Get the done configuration file name for a job. * @param jobId the jobId. * @return the conf file name. */ public static String getIntermediateConfFileName(JobId jobId) { return TypeConverter.fromYarn(jobId).toString() + CONF_FILE_NAME_SUFFIX; }
public QueueInfo getQueue(String queueName) throws IOException, InterruptedException { try { org.apache.hadoop.yarn.api.records.QueueInfo queueInfo = client.getQueueInfo(queueName); return (queueInfo == null) ? null : TypeConverter.fromYarn(queueInfo, conf); } catch (YarnException e) { throw new IOException(e); } }
/** * Get the done configuration file name for a job. * @param jobId the jobId. * @return the conf file name. */ public static String getIntermediateConfFileName(JobId jobId) { return TypeConverter.fromYarn(jobId).toString() + CONF_FILE_NAME_SUFFIX; }
/** * Get the done summary file name for a job. * @param jobId the jobId. * @return the conf file name. */ public static String getIntermediateSummaryFileName(JobId jobId) { return TypeConverter.fromYarn(jobId).toString() + SUMMARY_FILE_NAME_SUFFIX; }
public QueueInfo getQueue(String queueName) throws IOException, InterruptedException { try { org.apache.hadoop.yarn.api.records.QueueInfo queueInfo = client.getQueueInfo(queueName); return (queueInfo == null) ? null : TypeConverter.fromYarn(queueInfo, conf); } catch (YarnException e) { throw new IOException(e); } }
@Override public Map<JobId, Job> getAllJobs(ApplicationId appID) { if (LOG.isDebugEnabled()) { LOG.debug("Called getAllJobs(AppId): " + appID); } // currently there is 1 to 1 mapping between app and job id org.apache.hadoop.mapreduce.JobID oldJobID = TypeConverter.fromYarn(appID); Map<JobId, Job> jobs = new HashMap<JobId, Job>(); JobId jobID = TypeConverter.toYarn(oldJobID); jobs.put(jobID, getJob(jobID)); return jobs; }
/** * Returns the jobId from a job history file name. * @param pathString the path string. * @return the JobId * @throws IOException if the filename format is invalid. */ public static JobID getJobIDFromHistoryFilePath(String pathString) throws IOException { String [] parts = pathString.split(Path.SEPARATOR); String fileNamePart = parts[parts.length -1]; JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(fileNamePart); return TypeConverter.fromYarn(jobIndexInfo.getJobId()); }
private JobContext getJobContextFromVertexContext(OutputCommitterContext context) throws IOException { JobID jobId = TypeConverter.fromYarn( context.getApplicationId()); return new MRJobContextImpl(jobConf, jobId); }
public JobID getNewJobID() throws IOException, InterruptedException { try { this.application = client.createApplication().getApplicationSubmissionContext(); this.applicationId = this.application.getApplicationId(); return TypeConverter.fromYarn(applicationId); } catch (YarnException e) { throw new IOException(e); } }
@Override public Task createRemoteTask() { //job file name is set in TaskAttempt, setting it null here ReduceTask reduceTask = new ReduceTask("", TypeConverter.fromYarn(getID()), partition, numMapTasks, 1); // YARN doesn't have the concept of slots per task, set it as 1. reduceTask.setUser(conf.get(MRJobConfig.USER_NAME)); reduceTask.setConf(conf); return reduceTask; }
public static org.apache.hadoop.mapred.TaskAttemptID fromYarn( TaskAttemptId id) { return new org.apache.hadoop.mapred.TaskAttemptID(fromYarn(id.getTaskId()), id.getId()); }
@Override public Task createRemoteTask() { //job file name is set in TaskAttempt, setting it null here MapTask mapTask = new MapTask("", TypeConverter.fromYarn(getID()), partition, splitInfo.getSplitIndex(), 1); // YARN doesn't have the concept of slots per task, set it as 1. mapTask.setUser(conf.get(MRJobConfig.USER_NAME)); mapTask.setConf(conf); return mapTask; }
@Override protected void serviceStart() throws Exception { scheduler= createSchedulerProxy(); JobID id = TypeConverter.fromYarn(this.applicationId); JobId jobId = TypeConverter.toYarn(id); job = context.getJob(jobId); register(); startAllocatorThread(); super.serviceStart(); }