private void yarnReport() { RegisterApplicationMasterResponse response = yarn.getRegistrationResponse(); LOG.info("YARN queue: " + response.getQueue()); Resource resource = response.getMaximumResourceCapability(); LOG.info("YARN max resource: " + resource.getMemory() + " MB, " + resource.getVirtualCores() + " cores"); EnumSet<SchedulerResourceTypes> types = response .getSchedulerResourceTypes(); StringBuilder buf = new StringBuilder(); String sep = ""; for (SchedulerResourceTypes type : types) { buf.append(sep); buf.append(type.toString()); sep = ", "; } LOG.info("YARN scheduler resource types: " + buf.toString()); }
@Override public void register(String trackingUrl) throws YarnFacadeException { String thisHostName = NetUtils.getHostname(); LOG.debug("Host Name from YARN: " + thisHostName); if (trackingUrl != null) { // YARN seems to provide multiple names: MACHNAME.local/10.250.56.235 // The second seems to be the IP address, which is what we want. String names[] = thisHostName.split("/"); amHost = names[names.length - 1]; appMasterTrackingUrl = trackingUrl.replace("<host>", amHost); LOG.info("Tracking URL: " + appMasterTrackingUrl); } try { LOG.trace("Registering with YARN"); registration = resourceMgr.registerApplicationMaster(thisHostName, 0, appMasterTrackingUrl); } catch (YarnException | IOException e) { throw new YarnFacadeException("Register AM failed", e); } // Some distributions (but not the stock YARN) support Disk // resources. Since Drill compiles against Apache YARN, without disk // resources, we have to use an indirect mechnanism to look for the // disk enum at runtime when we don't have that enum value at compile time. for (SchedulerResourceTypes type : registration .getSchedulerResourceTypes()) { if (type.name().equals("DISK")) { supportsDisks = true; } } }
+ ACTIVE_STANDBY_ELECTOR_LOCK; byte[] data = zk.getData(path, zkw, new Stat()); ActiveRMInfoProto proto = ActiveRMInfoProto.parseFrom(data); proto.getRmId(); LOG.info("Active RmId : " + proto.getRmId()); config.get(YarnConfiguration.RM_HOSTNAME + "." + proto.getRmId()); LOG.info("activeResourceManagerHostname = " + activeRMHost);
private void initFields() { containerId_ = org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto.getDefaultInstance(); allocatedResource_ = org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto.getDefaultInstance(); assignedNodeId_ = org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto.getDefaultInstance(); priority_ = org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto.getDefaultInstance(); startTime_ = 0L; } private byte memoizedIsInitialized = -1;
private void initFields() { containerId_ = org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto.getDefaultInstance(); containerState_ = org.apache.hadoop.yarn.proto.YarnProtos.ContainerStateProto.C_NEW; resource_ = org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto.getDefaultInstance(); priority_ = org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto.getDefaultInstance(); diagnostics_ = "N/A"; containerExitStatus_ = 0; creationTime_ = 0L; } private byte memoizedIsInitialized = -1;
private void initFields() { resource_ = org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto.getDefaultInstance(); status_ = org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.ResourceStatusTypeProto.FETCH_PENDING; localPath_ = org.apache.hadoop.yarn.proto.YarnProtos.URLProto.getDefaultInstance(); localSize_ = 0L; exception_ = org.apache.hadoop.yarn.proto.YarnProtos.SerializedExceptionProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1;
public Builder mergeFrom(org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResponseProto other) { if (other == org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; }
public Builder mergeFrom(org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationResponseProto other) { if (other == org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; }
/** * <code>optional string application_name = 2;</code> */ public Builder clearApplicationName() { bitField0_ = (bitField0_ & ~0x00000002); applicationName_ = getDefaultInstance().getApplicationName(); onChanged(); return this; } /**
/** * <code>optional string application_type = 3;</code> */ public Builder clearApplicationType() { bitField0_ = (bitField0_ & ~0x00000004); applicationType_ = getDefaultInstance().getApplicationType(); onChanged(); return this; } /**
/** * <code>optional string application_type = 3;</code> */ public Builder clearApplicationType() { bitField0_ = (bitField0_ & ~0x00000004); applicationType_ = getDefaultInstance().getApplicationType(); onChanged(); return this; } /**
public org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto buildPartial() { org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto result = new org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto(this); onBuilt(); return result; }
public org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesRequestProto build() { org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; }
public Builder mergeFrom(org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesRequestProto other) { if (other == org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesRequestProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; }
private void initFields() { containerId_ = org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto.getDefaultInstance(); allocatedResource_ = org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto.getDefaultInstance(); assignedNodeId_ = org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto.getDefaultInstance(); priority_ = org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto.getDefaultInstance(); startTime_ = 0L; finishTime_ = 0L; diagnosticsInfo_ = ""; containerExitStatus_ = 0; containerState_ = org.apache.hadoop.yarn.proto.YarnProtos.ContainerStateProto.C_NEW; } private byte memoizedIsInitialized = -1;
private void initFields() { resource_ = org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto.getDefaultInstance(); status_ = org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.ResourceStatusTypeProto.FETCH_PENDING; localPath_ = org.apache.hadoop.yarn.proto.YarnProtos.URLProto.getDefaultInstance(); localSize_ = 0L; exception_ = org.apache.hadoop.yarn.proto.YarnProtos.SerializedExceptionProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1;
private void initFields() { containerId_ = org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto.getDefaultInstance(); allocatedResource_ = org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto.getDefaultInstance(); assignedNodeId_ = org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto.getDefaultInstance(); priority_ = org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto.getDefaultInstance(); startTime_ = 0L; finishTime_ = 0L; diagnosticsInfo_ = ""; containerExitStatus_ = 0; containerState_ = org.apache.hadoop.yarn.proto.YarnProtos.ContainerStateProto.C_NEW; } private byte memoizedIsInitialized = -1;
private void initFields() { resource_ = org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto.getDefaultInstance(); status_ = org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.ResourceStatusTypeProto.FETCH_PENDING; localPath_ = org.apache.hadoop.yarn.proto.YarnProtos.URLProto.getDefaultInstance(); localSize_ = 0L; exception_ = org.apache.hadoop.yarn.proto.YarnProtos.SerializedExceptionProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1;
private void initFields() { containerId_ = org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto.getDefaultInstance(); allocatedResource_ = org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto.getDefaultInstance(); assignedNodeId_ = org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto.getDefaultInstance(); priority_ = org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto.getDefaultInstance(); startTime_ = 0L; } private byte memoizedIsInitialized = -1;
private void initFields() { containerId_ = org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto.getDefaultInstance(); allocatedResource_ = org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto.getDefaultInstance(); assignedNodeId_ = org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto.getDefaultInstance(); priority_ = org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto.getDefaultInstance(); startTime_ = 0L; } private byte memoizedIsInitialized = -1;