Configuration conf = FSUtils.getRootDir(getConf()).getFileSystem(getConf()).getConf(); String nameServiceID = DFSUtil.getNamenodeNameServiceId(conf); if (!HAUtil.isHAEnabled(conf, nameServiceID)) { throw new Exception("HA for namenode is not enabled");
/** * Get an RPC proxy for each NN in an HA nameservice. Used when a given RPC * call should be made on every NN in an HA nameservice, not just the active. * * @param conf configuration * @param nsId the nameservice to get all of the proxies for. * @return a list of RPC proxies for each NN in the nameservice. * @throws IOException in the event of error. */ public static List<ClientProtocol> getProxiesForAllNameNodesInNameservice( Configuration conf, String nsId) throws IOException { List<ProxyAndInfo<ClientProtocol>> proxies = getProxiesForAllNameNodesInNameservice(conf, nsId, ClientProtocol.class); List<ClientProtocol> namenodes = new ArrayList<ClientProtocol>( proxies.size()); for (ProxyAndInfo<ClientProtocol> proxy : proxies) { namenodes.add(proxy.getProxy()); } return namenodes; }
HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId); if (!HAUtil.isAtLeastOneActive(namenodes)) { throw new IOException("Cannot finalize with no NameNode active"); HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); List<IOException> exceptions = new ArrayList<>();
public static List<RemoteNameNodeInfo> getRemoteNameNodes(Configuration conf, String nsId) throws IOException { // there is only a single NN configured (and no federation) so we don't have any more NNs if (nsId == null) { return Collections.emptyList(); } List<Configuration> otherNodes = HAUtil.getConfForOtherNodes(conf); List<RemoteNameNodeInfo> nns = new ArrayList<RemoteNameNodeInfo>(); for (Configuration otherNode : otherNodes) { String otherNNId = HAUtil.getNameNodeId(otherNode, nsId); // don't do any validation here as in some cases, it can be overwritten later InetSocketAddress otherIpcAddr = NameNode.getServiceAddress(otherNode, true); final String scheme = DFSUtil.getHttpClientScheme(conf); URL otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost(otherIpcAddr.getHostName(), otherNode, scheme).toURL(); nns.add(new RemoteNameNodeInfo(otherNode, otherNNId, otherIpcAddr, otherHttpAddr)); } return nns; }
boolean isHaEnabled = HAUtil.isHAEnabled(conf, nsId); boolean shouldWriteProtobufToken = conf.getBoolean( DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE, String nnId = HAUtil.getNameNodeId(conf, nsId); int nnIndex = 0; for (String id : nnIds) {
if (HAUtil.isHAEnabled(conf, DFSUtil.getNamenodeNameServiceId(conf))) { List<Configuration> otherNnConfs = HAUtil.getConfForOtherNodes(conf); for (Configuration otherNnConf : otherNnConfs) { validRequestors.add(SecurityUtil.getServerPrincipal(otherNnConf
this.role = role; String nsId = getNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); clientNamenodeAddress = NameNodeUtils.getClientNamenodeAddress( conf, nsId); + " this namenode/service.", clientNamenodeAddress); this.haEnabled = HAUtil.isHAEnabled(conf, nsId); state = createHAState(getStartupOption(conf)); this.allowStaleStandbyReads = HAUtil.shouldAllowStandbyReads(conf); this.haContext = createHAContext(); try {
private void parseConfAndFindOtherNN() throws IOException { Configuration conf = getConf(); nsId = DFSUtil.getNamenodeNameServiceId(conf); if (!HAUtil.isHAEnabled(conf, nsId)) { throw new HadoopIllegalArgumentException( "HA is not enabled for this namenode."); } nnId = HAUtil.getNameNodeId(conf, nsId); NameNode.initializeGenericKeys(conf, nsId, nnId); if (!HAUtil.usesSharedEditsDir(conf)) { throw new HadoopIllegalArgumentException( "Shared edits storage is not enabled for this namenode."); } Configuration otherNode = HAUtil.getConfForOtherNode(conf); otherNNId = HAUtil.getNameNodeId(otherNode, nsId); otherIpcAddr = NameNode.getServiceAddress(otherNode, true); Preconditions.checkArgument(otherIpcAddr.getPort() != 0 && !otherIpcAddr.getAddress().isAnyLocalAddress(), "Could not determine valid IPC address for other NameNode (%s)" + ", got: %s", otherNNId, otherIpcAddr); final String scheme = DFSUtil.getHttpClientScheme(conf); otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost( otherIpcAddr.getHostName(), otherNode, scheme).toURL(); dirsToFormat = FSNamesystem.getNamespaceDirs(conf); editUrisToFormat = FSNamesystem.getNamespaceEditsDirs( conf, false); sharedEditsUris = FSNamesystem.getSharedEditsDirs(conf); }
Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri); HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
useLogicalUri = HAUtil.useLogicalUri(conf, nsUri); } catch (IOException e){ LOG.warn("Getting exception while trying to determine if nameservice " + nsId + " can use logical URI: " + e); if (HAUtil.isHAEnabled(conf, nsId) && useLogicalUri) {
boolean isHaEnabled = HAUtil.isHAEnabled(conf, nsId); String thisNnId = HAUtil.getNameNodeId(conf, nsId); String otherNnId = HAUtil.getNameNodeIdOfOtherNode(conf, nsId); return new BlockTokenSecretManager(updateMin*60*1000L, lifetimeMin*60*1000L, thisNnId.compareTo(otherNnId) < 0 ? 0 : 1, null,
nsId = DFSUtil.getNamenodeNameServiceId(conf); if (!HAUtil.isHAEnabled(conf, nsId)) { throw new HadoopIllegalArgumentException( "HA is not enabled for this namenode."); nnId = HAUtil.getNameNodeId(conf, nsId); NameNode.initializeGenericKeys(conf, nsId, nnId); if (!HAUtil.usesSharedEditsDir(conf)) { throw new HadoopIllegalArgumentException( "Shared edits storage is not enabled for this namenode.");
boolean isHaAndLogicalUri = HAUtil.isLogicalUri(dfsConf, dfsUri); if (isHaAndLogicalUri) { HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId); if (!HAUtil.isAtLeastOneActive(namenodes)) { throw new IOException("Cannot finalize with no NameNode active"); HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
this.haEnabled = HAUtil.isHAEnabled(conf, nameserviceId); if (!haEnabled && HAUtil.usesSharedEditsDir(conf)) { LOG.warn("Configured NNs:\n" + DFSUtil.nnAddressesAsString(conf)); throw new IOException("Invalid configuration: a shared edits dir " +
/** * Clones the delegation token to individual host behind the same logical address. * * @param config the hadoop configuration * @throws IOException if failed to get information for the current user. */ public static void cloneHaNnCredentials(Configuration config) throws IOException { String scheme = URI.create(config.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT)).getScheme(); // Loop through all name services. Each name service could have multiple name node associated with it. for (Map.Entry<String, Map<String, InetSocketAddress>> entry : DFSUtil.getHaNnRpcAddresses(config).entrySet()) { String nsId = entry.getKey(); Map<String, InetSocketAddress> addressesInNN = entry.getValue(); if (!HAUtil.isHAEnabled(config, nsId) || addressesInNN == null || addressesInNN.isEmpty()) { continue; } // The client may have a delegation token set for the logical // URI of the cluster. Clone this token to apply to each of the // underlying IPC addresses so that the IPC code can find it. URI uri = URI.create(scheme + "://" + nsId); LOG.info("Cloning delegation token for uri {}", uri); HAUtil.cloneDelegationTokenForLogicalUri(UserGroupInformation.getCurrentUser(), uri, addressesInNN.values()); } }
String bindHostActualKey; if (nsId != null) { String namenodeId = HAUtil.getNameNodeId(conf, nsId); address = DFSUtilClient.getAddressesForNameserviceId( conf, nsId, null, confKey).get(namenodeId);
Token<DelegationTokenIdentifier> delegationToken() throws IOException { String delegation = param(DelegationParam.NAME); final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(); token.decodeFromUrlString(delegation); URI nnUri = URI.create(HDFS_URI_SCHEME + "://" + namenodeId()); boolean isLogical = HAUtil.isLogicalUri(conf, nnUri); if (isLogical) { token.setService(HAUtil.buildTokenServiceForLogicalUri(nnUri, HDFS_URI_SCHEME)); } else { token.setService(SecurityUtil.buildTokenService(nnUri)); } return token; }
private static ClientProtocol getNNProxy( Token<DelegationTokenIdentifier> token, Configuration conf) throws IOException { URI uri = HAUtil.getServiceUriFromToken(HdfsConstants.HDFS_URI_SCHEME, token); if (HAUtil.isTokenForLogicalUri(token) && !HAUtil.isLogicalUri(conf, uri)) { // If the token is for a logical nameservice, but the configuration // we have disagrees about that, we can't actually renew it. // This can be the case in MR, for example, if the RM doesn't // have all of the HA clusters configured in its configuration. throw new IOException("Unable to map logical nameservice URI '" + uri + "' to a NameNode. Local configuration does not have " + "a failover proxy provider configured."); } NameNodeProxies.ProxyAndInfo<ClientProtocol> info = NameNodeProxies.createProxy(conf, uri, ClientProtocol.class); assert info.getDelegationTokenService().equals(token.getService()) : "Returned service '" + info.getDelegationTokenService().toString() + "' doesn't match expected service '" + token.getService().toString() + "'"; return info.getProxy(); }
this.nnAddrs = resolveNNAddr(); boolean isHA = HAUtil.isClientFailoverConfigured(conf, this.uri); boolean isLogicalUri = isHA && HAUtil.isLogicalUri(conf, this.uri); HAUtil.buildTokenServiceForLogicalUri(uri, getScheme()) : SecurityUtil.buildTokenService(getCanonicalUri());
/** * Derive the namenode http address from the current file system, * either default or as set by "-fs" in the generic options. * @return Returns http address or null if failure. * @throws IOException if we can't determine the active NN address */ private URI getCurrentNamenodeAddress(Path target) throws IOException { //String nnAddress = null; Configuration conf = getConf(); //get the filesystem object to verify it is an HDFS system final FileSystem fs = target.getFileSystem(conf); if (!(fs instanceof DistributedFileSystem)) { System.err.println("FileSystem is " + fs.getUri()); return null; } return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf, DFSUtil.getHttpClientScheme(conf)); }