private static ResourceManager mockRmWithApps(RMContext rmContext) throws IOException { ResourceManager rm = mock(ResourceManager.class); ResourceScheduler rs = mockFairSchedulerWithoutApps(rmContext); ClientRMService clientRMService = mockClientRMService(rmContext); when(rm.getResourceScheduler()).thenReturn(rs); when(rm.getRMContext()).thenReturn(rmContext); when(rm.getClientRMService()).thenReturn(clientRMService); return rm; }
@Before public void setUp() throws Exception { resourceManager = new ResourceManager(); Configuration conf = new Configuration(); conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class); resourceManager.init(conf); }
private synchronized void startResourceManager(final int index) { try { resourceManagers[index].start(); if (resourceManagers[index].getServiceState() != STATE.STARTED) { // RM could have failed. throw new IOException( "ResourceManager failed to start. Final state is " + resourceManagers[index].getServiceState()); } } catch (Throwable t) { throw new YarnRuntimeException(t); } Configuration conf = resourceManagers[index].getConfig(); LOG.info("MiniYARN ResourceManager address: " + conf.get(YarnConfiguration.RM_ADDRESS)); LOG.info("MiniYARN ResourceManager web address: " + WebAppUtils.getRMWebAppURLWithoutScheme(conf)); }
@Inject QueuesBlock(ResourceManager rm, CSQInfo info) { cs = (CapacityScheduler) rm.getResourceScheduler(); csqinfo = info; this.rm = rm; RMNodeLabelsManager nodeLabelManager = rm.getRMContext().getNodeLabelManager(); nodeLabelsInfo = nodeLabelManager.pullRMNodeLabelsInfo(); }
void reinitialize(boolean initialize) { ClusterMetrics.destroy(); QueueMetrics.clearQueueMetrics(); getResourceScheduler().resetSchedulerMetrics(); if (initialize) { resetRMContext(); createAndInitActiveServices(true); } }
public ClusterInfo(ResourceManager rm) { long ts = ResourceManager.getClusterTimeStamp(); this.id = ts; this.state = rm.getServiceState(); this.haState = rm.getRMContext().getHAServiceState(); this.rmStateStoreName = rm.getRMContext().getStateStore().getClass() .getName(); this.startedOn = ts; this.resourceManagerVersion = YarnVersionInfo.getVersion(); this.resourceManagerBuildVersion = YarnVersionInfo.getBuildVersion(); this.resourceManagerVersionBuiltOn = YarnVersionInfo.getDate(); this.hadoopVersion = VersionInfo.getVersion(); this.hadoopBuildVersion = VersionInfo.getBuildVersion(); this.hadoopVersionBuiltOn = VersionInfo.getDate(); this.haZooKeeperConnectionState = rm.getRMContext().getHAZookeeperConnectionState(); }
rmSecretManagerService = createRMSecretManagerService(); addService(rmSecretManagerService); AMLivelinessMonitor amLivelinessMonitor = createAMLivelinessMonitor(); addService(amLivelinessMonitor); rmContext.setAMLivelinessMonitor(amLivelinessMonitor); AMLivelinessMonitor amFinishingMonitor = createAMLivelinessMonitor(); addService(amFinishingMonitor); rmContext.setAMFinishingMonitor(amFinishingMonitor); RMNodeLabelsManager nlm = createNodeLabelManager(); nlm.setRMContext(rmContext); addService(nlm); delegationTokenRenewer = createDelegationTokenRenewer(); rmContext.setDelegationTokenRenewer(delegationTokenRenewer); scheduler = createScheduler(); scheduler.setRMContext(rmContext); addIfService(scheduler); rmContext.setScheduler(scheduler); schedulerDispatcher = createSchedulerEventDispatcher(); addIfService(schedulerDispatcher); rmDispatcher.register(SchedulerEventType.class, schedulerDispatcher); resourceTracker = createResourceTrackerService(); addService(resourceTracker);
createHARMConf("rm1,rm2", "rm1", 1234, false, curatorTestingServer); ResourceManager rm1 = new MockRM(conf1); rm1.start(); rm1.getRMContext().getRMAdminService().transitionToActive(req); assertEquals("RM with ZKStore didn't start", Service.STATE.STARTED, rm1.getServiceState()); assertEquals("RM should be Active", HAServiceProtocol.HAServiceState.ACTIVE, rm1.getRMContext().getRMAdminService().getServiceStatus().getState()); rm2.start(); rm2.getRMContext().getRMAdminService().transitionToActive(req); assertEquals("RM with ZKStore didn't start", Service.STATE.STARTED, rm2.getServiceState()); assertEquals("RM should be Active", HAServiceProtocol.HAServiceState.ACTIVE, rm2.getRMContext().getRMAdminService().getServiceStatus().getState()); rm1.getRMContext().getRMAdminService().getServiceStatus().getState()) { Thread.sleep(100); rm1.getRMContext().getRMAdminService().getServiceStatus().getState()); assertEquals("RM should be Active", HAServiceProtocol.HAServiceState.ACTIVE, rm2.getRMContext().getRMAdminService().getServiceStatus().getState()); rm1.close(); rm2.close();
@Before public void setUp() throws Exception { conf = new YarnConfiguration(); conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoSchedulerWithMove.class, FifoSchedulerWithMove.class); conf.set(YarnConfiguration.YARN_ADMIN_ACL, " "); conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); resourceManager = new ResourceManager(); resourceManager.init(conf); resourceManager.getRMContext().getContainerTokenSecretManager().rollMasterKey(); resourceManager.getRMContext().getNMTokenSecretManager().rollMasterKey(); resourceManager.start(); failMove = false; }
rmSecretManagerService = createRMSecretManagerService(); addService(rmSecretManagerService); AMLivelinessMonitor amLivelinessMonitor = createAMLivelinessMonitor(); addService(amLivelinessMonitor); rmContext.setAMLivelinessMonitor(amLivelinessMonitor); AMLivelinessMonitor amFinishingMonitor = createAMLivelinessMonitor(); addService(amFinishingMonitor); rmContext.setAMFinishingMonitor(amFinishingMonitor); RMAppLifetimeMonitor rmAppLifetimeMonitor = createRMAppLifetimeMonitor(); addService(rmAppLifetimeMonitor); rmContext.setRMAppLifetimeMonitor(rmAppLifetimeMonitor); RMNodeLabelsManager nlm = createNodeLabelManager(); nlm.setRMContext(rmContext); addService(nlm); rmContext.setNodeLabelManager(nlm); NodeAttributesManager nam = createNodeAttributesManager(); addService(nam); rmContext.setNodeAttributesManager(nam); createAllocationTagsManager(); rmContext.setAllocationTagsManager(allocationTagsManager); createPlacementConstraintManager(); addService(placementConstraintManager);
@Before public void setUp() throws Exception { Configuration conf = new YarnConfiguration(); UserGroupInformation.setConfiguration(conf); resourceManager = new ResourceManager(); resourceManager.init(conf); resourceManager.getRMContext().getContainerTokenSecretManager().rollMasterKey(); resourceManager.getRMContext().getNMTokenSecretManager().rollMasterKey(); }
private void startResourceManager(float utilizationThreshold) { conf.setFloat(FairSchedulerConfiguration.PREEMPTION_THRESHOLD, utilizationThreshold); resourceManager = new MockRM(conf); resourceManager.start(); assertTrue( resourceManager.getResourceScheduler() instanceof StubbedFairScheduler); scheduler = (FairScheduler)resourceManager.getResourceScheduler(); scheduler.setClock(clock); scheduler.updateInterval = 60 * 1000; }
public static ResourceManager mockRm(RMContext rmContext) throws IOException { ResourceManager rm = mock(ResourceManager.class); ResourceScheduler rs = mockCapacityScheduler(); ApplicationACLsManager aclMgr = mockAppACLsManager(); ClientRMService clientRMService = mockClientRMService(rmContext); when(rm.getResourceScheduler()).thenReturn(rs); when(rm.getRMContext()).thenReturn(rmContext); when(rm.getApplicationACLsManager()).thenReturn(aclMgr); when(rm.getClientRMService()).thenReturn(clientRMService); return rm; }
@Test public void testServiceAclsRefreshWithLocalConfigurationProvider() { configuration.setBoolean( CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, true); ResourceManager resourceManager = null; try { resourceManager = new ResourceManager(); resourceManager.init(configuration); resourceManager.start(); resourceManager.adminService.refreshServiceAcls(RefreshServiceAclsRequest .newInstance()); } catch (Exception ex) { fail("Using localConfigurationProvider. Should not get any exception."); } finally { if (resourceManager != null) { resourceManager.stop(); } } }
@Test(timeout = 10000) public void testRMStarts() throws Exception { Configuration conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true); conf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, ProportionalCapacityPreemptionPolicy.class.getCanonicalName()); ResourceManager rm = new ResourceManager(); rm.init(conf); SchedulingEditPolicy mPolicy = mock(SchedulingEditPolicy.class); when(mPolicy.getMonitoringInterval()).thenReturn(1000L); SchedulingMonitor monitor = new SchedulingMonitor(rm.getRMContext(), mPolicy); monitor.serviceInit(conf); monitor.serviceStart(); verify(mPolicy, timeout(10000)).editSchedule(); monitor.close(); rm.close(); } }
private synchronized void initResourceManager(int index, Configuration conf) { if (HAUtil.isHAEnabled(conf)) { conf.set(YarnConfiguration.RM_HA_ID, rmIds[index]); } resourceManagers[index].init(conf); resourceManagers[index].getRMContext().getDispatcher().register( RMAppAttemptEventType.class, new EventHandler<RMAppAttemptEvent>() { public void handle(RMAppAttemptEvent event) { if (event instanceof RMAppAttemptRegistrationEvent) { appMasters.put(event.getApplicationAttemptId(), event.getTimestamp()); } else if (event instanceof RMAppAttemptUnregistrationEvent) { appMasters.remove(event.getApplicationAttemptId()); } } }); }
@Override public RenewDelegationTokenResponse run() throws YarnException { return rm.getClientRMService().renewDelegationToken(req); } });
private YarnClusterConfiguration prepareYarnCluster() throws IOException, URISyntaxException { yarnCluster.init(yarnConf); yarnCluster.start(); yarnConf.set(RM_ADDRESS, yarnCluster.getResourceManager().getConfig().get(RM_ADDRESS)); File yarnSite = new File(workDir, "yarn-site.xml"); try (PrintWriter pw = new PrintWriter(new FileWriter(yarnSite))) { yarnConf.writeXml(pw); } Path flinkUberJar = new Path(new File(workDir, "flink.jar").toURI()); Path flinkConfYaml = new Path(new File(workDir, "flink-conf.yaml").toURI()); @SuppressWarnings("ConstantConditions") Path log4jPath = new Path(Thread.currentThread().getContextClassLoader().getResource("log4j.properties").toURI()); Set<Path> resourcesToLocalize = new HashSet<>(Arrays.asList(flinkUberJar, flinkConfYaml, log4jPath)); String home = workDir.toURI().toString(); return new YarnClusterConfiguration( yarnConf, home, flinkUberJar, resourcesToLocalize, systemJars(yarnSite)); }
@Before public void setUp() throws Exception { conf = new YarnConfiguration(); conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoSchedulerWithMove.class, FifoSchedulerWithMove.class); conf.set(YarnConfiguration.YARN_ADMIN_ACL, " "); conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); resourceManager = new MockRM(conf); resourceManager.getRMContext().getContainerTokenSecretManager().rollMasterKey(); resourceManager.getRMContext().getNMTokenSecretManager().rollMasterKey(); resourceManager.start(); failMove = false; }
@Test(timeout = 10000) public void testRMStarts() throws Exception { Configuration conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true); conf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, ProportionalCapacityPreemptionPolicy.class.getCanonicalName()); ResourceManager rm = new MockRM(); rm.init(conf); SchedulingEditPolicy mPolicy = mock(SchedulingEditPolicy.class); when(mPolicy.getMonitoringInterval()).thenReturn(1000L); SchedulingMonitor monitor = new SchedulingMonitor(rm.getRMContext(), mPolicy); monitor.serviceInit(conf); monitor.serviceStart(); verify(mPolicy, timeout(10000)).editSchedule(); monitor.close(); rm.close(); }