/** * Returns the Jetty server that the Namenode is listening on. */ private URL getInfoServer() throws IOException { URI fsName = FileSystem.getDefaultUri(conf); if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) { throw new IOException("This is not a DFS"); } final String scheme = DFSUtil.getHttpClientScheme(conf); URI address = DFSUtil.getInfoServerWithDefaultHost(fsName.getHost(), conf, scheme); LOG.debug("Will connect to NameNode at " + address); return address.toURL(); }
private URL getHttpAddress(Configuration conf) throws IOException { final String scheme = DFSUtil.getHttpClientScheme(conf); String defaultHost = NameNode.getServiceAddress(conf, true).getHostName(); URI addr = DFSUtil.getInfoServerWithDefaultHost(defaultHost, conf, scheme); return addr.toURL(); }
public static List<RemoteNameNodeInfo> getRemoteNameNodes(Configuration conf, String nsId) throws IOException { // there is only a single NN configured (and no federation) so we don't have any more NNs if (nsId == null) { return Collections.emptyList(); } List<Configuration> otherNodes = HAUtil.getConfForOtherNodes(conf); List<RemoteNameNodeInfo> nns = new ArrayList<RemoteNameNodeInfo>(); for (Configuration otherNode : otherNodes) { String otherNNId = HAUtil.getNameNodeId(otherNode, nsId); // don't do any validation here as in some cases, it can be overwritten later InetSocketAddress otherIpcAddr = NameNode.getServiceAddress(otherNode, true); final String scheme = DFSUtil.getHttpClientScheme(conf); URL otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost(otherIpcAddr.getHostName(), otherNode, scheme).toURL(); nns.add(new RemoteNameNodeInfo(otherNode, otherNNId, otherIpcAddr, otherHttpAddr)); } return nns; }
private URL getHttpAddress(Configuration conf) throws IOException { final String scheme = DFSUtil.getHttpClientScheme(conf); String defaultHost = NameNode.getServiceAddress(conf, true).getHostName(); URI addr = DFSUtil.getInfoServerWithDefaultHost(defaultHost, conf, scheme); return addr.toURL(); }
/** * Returns the Jetty server that the Namenode is listening on. */ private URL getInfoServer() throws IOException { URI fsName = FileSystem.getDefaultUri(conf); if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) { throw new IOException("This is not a DFS"); } final String scheme = DFSUtil.getHttpClientScheme(conf); URI address = DFSUtil.getInfoServerWithDefaultHost(fsName.getHost(), conf, scheme); LOG.debug("Will connect to NameNode at " + address); return address.toURL(); }
private URL getHttpAddress(Configuration conf) throws IOException { final String scheme = DFSUtil.getHttpClientScheme(conf); String defaultHost = NameNode.getServiceAddress(conf, true).getHostName(); URI addr = DFSUtil.getInfoServerWithDefaultHost(defaultHost, conf, scheme); return addr.toURL(); }
/** * Returns the Jetty server that the Namenode is listening on. */ private URL getInfoServer() throws IOException { URI fsName = FileSystem.getDefaultUri(conf); if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) { throw new IOException("This is not a DFS"); } final String scheme = DFSUtil.getHttpClientScheme(conf); URI address = DFSUtil.getInfoServerWithDefaultHost(fsName.getHost(), conf, scheme); LOG.debug("Will connect to NameNode at " + address); return address.toURL(); }
private void parseConfAndFindOtherNN() throws IOException { Configuration conf = getConf(); nsId = DFSUtil.getNamenodeNameServiceId(conf); if (!HAUtil.isHAEnabled(conf, nsId)) { throw new HadoopIllegalArgumentException( "HA is not enabled for this namenode."); } nnId = HAUtil.getNameNodeId(conf, nsId); NameNode.initializeGenericKeys(conf, nsId, nnId); if (!HAUtil.usesSharedEditsDir(conf)) { throw new HadoopIllegalArgumentException( "Shared edits storage is not enabled for this namenode."); } Configuration otherNode = HAUtil.getConfForOtherNode(conf); otherNNId = HAUtil.getNameNodeId(otherNode, nsId); otherIpcAddr = NameNode.getServiceAddress(otherNode, true); Preconditions.checkArgument(otherIpcAddr.getPort() != 0 && !otherIpcAddr.getAddress().isAnyLocalAddress(), "Could not determine valid IPC address for other NameNode (%s)" + ", got: %s", otherNNId, otherIpcAddr); final String scheme = DFSUtil.getHttpClientScheme(conf); otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost( otherIpcAddr.getHostName(), otherNode, scheme).toURL(); dirsToFormat = FSNamesystem.getNamespaceDirs(conf); editUrisToFormat = FSNamesystem.getNamespaceEditsDirs( conf, false); sharedEditsUris = FSNamesystem.getSharedEditsDirs(conf); }
private void parseConfAndFindOtherNN() throws IOException { Configuration conf = getConf(); nsId = DFSUtil.getNamenodeNameServiceId(conf); if (!HAUtil.isHAEnabled(conf, nsId)) { throw new HadoopIllegalArgumentException( "HA is not enabled for this namenode."); } nnId = HAUtil.getNameNodeId(conf, nsId); NameNode.initializeGenericKeys(conf, nsId, nnId); if (!HAUtil.usesSharedEditsDir(conf)) { throw new HadoopIllegalArgumentException( "Shared edits storage is not enabled for this namenode."); } Configuration otherNode = HAUtil.getConfForOtherNode(conf); otherNNId = HAUtil.getNameNodeId(otherNode, nsId); otherIpcAddr = NameNode.getServiceAddress(otherNode, true); Preconditions.checkArgument(otherIpcAddr.getPort() != 0 && !otherIpcAddr.getAddress().isAnyLocalAddress(), "Could not determine valid IPC address for other NameNode (%s)" + ", got: %s", otherNNId, otherIpcAddr); final String scheme = DFSUtil.getHttpClientScheme(conf); otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost( otherIpcAddr.getHostName(), otherNode, scheme).toURL(); dirsToFormat = FSNamesystem.getNamespaceDirs(conf); editUrisToFormat = FSNamesystem.getNamespaceEditsDirs( conf, false); sharedEditsUris = FSNamesystem.getSharedEditsDirs(conf); }