public JstormOnYarn() throws Exception { this(new YarnConfiguration()); }
public YarnRMClient() { this(new YarnConfiguration()); }
/** * @param ignitePath Hdfs path to ignite. * @param props Cluster properties. */ public ApplicationMaster(String ignitePath, ClusterProperties props) throws Exception { this.conf = new YarnConfiguration(); this.props = props; this.ignitePath = new Path(ignitePath); }
public JstormMaster() { // Set up the configuration conf = new YarnConfiguration(); Path jstormyarnConfPath = new Path("jstorm-yarn.xml"); conf.addResource(jstormyarnConfPath); }
/** * Create a configuration with Slider-specific tuning. * This is done rather than doing custom configs. * * @return the config */ public static YarnConfiguration createConfiguration() { YarnConfiguration conf = new YarnConfiguration(); patchConfiguration(conf); return conf; }
public AzkabanGobblinYarnAppLauncher(String jobId, Properties props) throws IOException { super(jobId, LOGGER); Config gobblinConfig = ConfigUtils.propertiesToConfig(props); this.gobblinYarnAppLauncher = new GobblinYarnAppLauncher(gobblinConfig, new YarnConfiguration()); }
/** * Lazy loading of YARN configuration since it takes a long time to load. * (YARN provides no caching, sadly.) */ private void loadYarnConfig() { if (yarnConf == null) { yarnConf = new YarnConfiguration(); // On some distributions, lack of proper configuration causes // DFS to default to the local file system. So, a local file // system generally means that the config is wrong, or running // the wrong build of Drill for the user's environment. URI fsUri = FileSystem.getDefaultUri( yarnConf ); if(fsUri.toString().startsWith("file:/")) { System.err.println("Warning: Default DFS URI is for a local file system: " + fsUri); } } }
public static void main(String[] args) throws Exception { final GobblinYarnAppLauncher gobblinYarnAppLauncher = new GobblinYarnAppLauncher(ConfigFactory.load(), new YarnConfiguration()); Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { try { gobblinYarnAppLauncher.stop(); } catch (IOException ioe) { LOGGER.error("Failed to shutdown the " + GobblinYarnAppLauncher.class.getSimpleName(), ioe); } catch (TimeoutException te) { LOGGER.error("Timeout in stopping the service manager", te); } finally { if (gobblinYarnAppLauncher.emailNotificationOnShutdown) { gobblinYarnAppLauncher.sendEmailOnShutdown(Optional.<ApplicationReport>absent()); } } } }); gobblinYarnAppLauncher.launch(); } }
@Override public void run() { // Dump configuration if requested for diagnostic use. System.out.println("----------------------------------------------"); System.out.println("Effective Drill-on-YARN Configuration"); DrillOnYarnConfig.instance().dump(); System.out.println("----------------------------------------------"); // Dump YARN configuration. System.out.println("YARN, DFS and Hadoop Configuration"); YarnConfiguration conf = new YarnConfiguration(); try { YarnConfiguration.dumpConfiguration(conf, new OutputStreamWriter(System.out)); System.out.println(); } catch (IOException e) { // Ignore; } System.out.println("----------------------------------------------"); } }
this.yarnConfiguration = new YarnConfiguration();
senderExecutor = new ThreadPoolExecutor(1, 1, 0, TimeUnit.MILLISECONDS, senderQueue, threadFactory); YarnConfiguration yarnConf = new YarnConfiguration(); timelineClient = TimelineClient.createTimelineClient(); timelineClient.init(yarnConf);
public Executor(String instancName, String shellCommand, STARTType startType, String runningContainer, String localDir, String deployPath, String hadoopHome, String javaHome, String pythonHome, String dstPath, String portList, String shellArgs, String ExecShellStringPath, String applicationId, String supervisorLogviewPort, String nimbusThriftPort) { executorMeta = new ExecutorMeta(instancName, shellCommand, startType, runningContainer, localDir, deployPath, hadoopHome, javaHome, pythonHome, dstPath, portList, shellArgs, ExecShellStringPath, applicationId, supervisorLogviewPort, nimbusThriftPort); conf = new YarnConfiguration(); Path yarnSite = new Path(hadoopHome + JOYConstants.YARN_SITE_PATH); conf.addResource(yarnSite); //get first log dir logDir = conf.get(JOYConstants.YARN_NM_LOG, JOYConstants.YARN_NM_LOG_DIR).split(JOYConstants.COMMA)[0] + JOYConstants.BACKLASH + applicationId + JOYConstants.BACKLASH + runningContainer; //Setup RegistryOperations registryOperations = RegistryOperationsFactory.createInstance(JOYConstants.YARN_REGISTRY, conf); try { setupInitialRegistryPaths(); } catch (IOException e) { e.printStackTrace(); } registryOperations.start(); }
public static void main(String[] args) throws Exception { Options options = buildOptions(); try { CommandLine cmd = new DefaultParser().parse(options, args); if (!cmd.hasOption(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME)) { printUsage(options); System.exit(1); } Log4jConfigurationHelper.updateLog4jConfiguration(GobblinApplicationMaster.class, GobblinYarnConfigurationKeys.GOBBLIN_YARN_LOG4J_CONFIGURATION_FILE, GobblinYarnConfigurationKeys.GOBBLIN_YARN_LOG4J_CONFIGURATION_FILE); LOGGER.info(JvmUtils.getJvmInputArguments()); ContainerId containerId = ConverterUtils.toContainerId(System.getenv().get(ApplicationConstants.Environment.CONTAINER_ID.key())); try (GobblinApplicationMaster applicationMaster = new GobblinApplicationMaster( cmd.getOptionValue(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME), containerId, ConfigFactory.load(), new YarnConfiguration())) { applicationMaster.start(); } } catch (ParseException pe) { printUsage(options); System.exit(1); } } }
YarnConfiguration yarnConf = new YarnConfiguration(conf); String active = RMHAUtils.findActiveRMHAId(yarnConf); rmWebHost = HAUtil.getConfValueForRMInstance(HAUtil.addSuffix(webappConfKey, active), defaultAddr,
jobManagerMetricGroup); this.flinkConfig = flinkConfig; this.yarnConfig = new YarnConfiguration(); this.env = env; this.workerNodeMap = new ConcurrentHashMap<>();
static JobHistoryServer launchJobHistoryServer(String[] args) { Thread. setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); StringUtils.startupShutdownMessage(JobHistoryServer.class, args, LOG); JobHistoryServer jobHistoryServer = null; try { jobHistoryServer = new JobHistoryServer(); ShutdownHookManager.get().addShutdownHook( new CompositeServiceShutdownHook(jobHistoryServer), SHUTDOWN_HOOK_PRIORITY); YarnConfiguration conf = new YarnConfiguration(new JobConf()); new GenericOptionsParser(conf, args); jobHistoryServer.init(conf); jobHistoryServer.start(); } catch (Throwable t) { LOG.fatal("Error starting JobHistoryServer", t); ExitUtil.terminate(-1, "Error starting JobHistoryServer"); } return jobHistoryServer; }
senderExecutor = new ThreadPoolExecutor(1, 1, 0, TimeUnit.MILLISECONDS, senderQueue, threadFactory); YarnConfiguration yarnConf = new YarnConfiguration(); timelineClient = TimelineClient.createTimelineClient(); timelineClient.init(yarnConf);
@BeforeClass public static void setupClass() { yarnConfiguration = new YarnConfiguration(); yarnClient = YarnClient.createYarnClient(); yarnClient.init(yarnConfiguration); yarnClient.start(); }
@Override public void start(CallbackHandler resourceCallback, org.apache.hadoop.yarn.client.api.async.NMClientAsync.CallbackHandler nodeCallback ) { conf = new YarnConfiguration(); resourceMgr = AMRMClientAsync.createAMRMClientAsync(pollPeriodMs, resourceCallback); resourceMgr.init(conf); resourceMgr.start(); // Create the asynchronous node manager client nodeMgr = NMClientAsync.createNMClientAsync(nodeCallback); nodeMgr.init(conf); nodeMgr.start(); client = YarnClient.createYarnClient(); client.init(conf); client.start(); String appIdStr = System.getenv(DrillOnYarnConfig.APP_ID_ENV_VAR); if (appIdStr != null) { appId = ConverterUtils.toApplicationId(appIdStr); try { appReport = client.getApplicationReport(appId); } catch (YarnException | IOException e) { LOG.error( "Failed to get YARN applicaiton report for App ID: " + appIdStr, e); } } }
/** * Tests that the cluster retrieval of a finished YARN application fails. */ @Test(expected = ClusterRetrieveException.class) public void testClusterClientRetrievalOfFinishedYarnApplication() throws Exception { final ApplicationId applicationId = ApplicationId.newInstance(System.currentTimeMillis(), 42); final ApplicationReport applicationReport = createApplicationReport( applicationId, YarnApplicationState.FINISHED, FinalApplicationStatus.SUCCEEDED); final YarnClient yarnClient = new TestingYarnClient(Collections.singletonMap(applicationId, applicationReport)); final YarnConfiguration yarnConfiguration = new YarnConfiguration(); yarnClient.init(yarnConfiguration); yarnClient.start(); final TestingAbstractYarnClusterDescriptor clusterDescriptor = new TestingAbstractYarnClusterDescriptor( new Configuration(), yarnConfiguration, temporaryFolder.newFolder().getAbsolutePath(), yarnClient, false); try { clusterDescriptor.retrieve(applicationId); } finally { clusterDescriptor.close(); } }