/** * Create a new DFSClient connected to the default namenode. */ public DFSClient(Configuration conf) throws IOException { this(NameNode.getAddress(conf), conf); }
/** * Get the configured address of the datanode to run the RPC server * processing requests from datanodes. Returns the address if it is * configured, otherwise will return null. * * @param conf * @return the address object or null if it is not configured */ public static InetSocketAddress getDNProtocolAddress(Configuration conf) { return getAddress(conf, DATANODE_PROTOCOL_ADDRESS); }
/** * Same as this(NameNode.getAddress(conf), conf); * @see #DFSClient(InetSocketAddress, Configuration) */ public DFSClient(Configuration conf) throws IOException { this(NameNode.getAddress(conf), conf); }
@Override protected InetSocketAddress getProtocolAddress(Configuration conf) throws IOException { return NameNode.getAddress(conf); }
@Override protected InetSocketAddress getProtocolAddress(Configuration conf) throws IOException { return NameNode.getAddress(conf); }
/** * Same as this(NameNode.getAddress(conf), conf); * @see #DFSClient(InetSocketAddress, Configuration) * @deprecated Deprecated at 0.21 */ @Deprecated public DFSClient(Configuration conf) throws IOException { this(NameNode.getAddress(conf), conf); }
private static InetSocketAddress getAddress(Configuration conf, String property) { String dnAddressString = conf.get(property); if (dnAddressString == null || dnAddressString.isEmpty()) return null; return getAddress(dnAddressString); }
@Override public void loginAsFCUser() throws IOException { InetSocketAddress socAddr = NameNode.getAddress(conf); SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); }
/** * The locking hierarchy is to first acquire lock on DFSClient object, followed by * lock on leasechecker, followed by lock on an individual DFSOutputStream. */ public static ClientProtocol createNamenode(Configuration conf) throws IOException { return createNamenode(NameNode.getAddress(conf), conf); }
public static ProtocolProxy<ClientProtocol> createRPCNamenode( Configuration conf) throws IOException { try { return createRPCNamenode(NameNode.getAddress(conf), conf, UnixUserGroupInformation.login(conf, true)); } catch (LoginException e) { throw new IOException(e); } }
public void testGetAddressFromConf() throws Exception { Configuration conf = new HdfsConfiguration(); FileSystem.setDefaultUri(conf, "hdfs://foo/"); assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT); FileSystem.setDefaultUri(conf, "hdfs://foo:555/"); assertEquals(NameNode.getAddress(conf).getPort(), 555); FileSystem.setDefaultUri(conf, "foo"); assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT); }
static org.apache.hadoop.hdfs.protocol.ClientProtocol getDFSClient( Configuration conf, UserGroupInformation ugi) throws IOException { return (org.apache.hadoop.hdfs.protocol.ClientProtocol) RPC.getProxy(org.apache.hadoop.hdfs.protocol.ClientProtocol.class, org.apache.hadoop.hdfs.protocol.ClientProtocol.versionID, NameNode.getAddress(conf), ugi, conf, NetUtils.getSocketFactory(conf, org.apache.hadoop.hdfs.protocol.ClientProtocol.class)); }
public void initialize(URI uri, Configuration conf) throws IOException { setConf(conf); String host = uri.getHost(); if (host == null) { throw new IOException("Incomplete HDFS URI, no host: "+ uri); } InetSocketAddress namenode = NameNode.getAddress(uri.getAuthority()); this.dfs = new DFSClient(namenode, conf, statistics); this.uri = NameNode.getUri(namenode); this.workingDir = getHomeDirectory(); }
private NamenodeMXBeanHelper getNNHelper(InetSocketAddress isa) throws IOException, MalformedObjectNameException { if (localnn != null) { Configuration runningConf = localnn.getConf(); InetSocketAddress nameNodeAddr = NameNode.getAddress(runningConf); if (nameNodeAddr.equals(isa)) { return new NamenodeMXBeanHelper(isa, conf, localnn); } } return new NamenodeMXBeanHelper(isa, conf); } /**
private void setUpForDoGetTest(MiniDFSCluster cluster, Path testFile) { Mockito.doReturn(CONF).when(mockServletContext).getAttribute( JspHelper.CURRENT_CONF); Mockito.doReturn(NetUtils.getHostPortString(NameNode.getAddress(CONF))) .when(mockHttpServletRequest).getParameter("nnaddr"); Mockito.doReturn(testFile.toString()).when(mockHttpServletRequest) .getPathInfo(); Mockito.doReturn("/streamFile"+testFile.toString()).when(mockHttpServletRequest) .getRequestURI(); }
@Test public void testGetAddressFromString() throws Exception { assertEquals(NameNode.getAddress("foo").getPort(), NameNode.DEFAULT_PORT); assertEquals(NameNode.getAddress("hdfs://foo/").getPort(), NameNode.DEFAULT_PORT); assertEquals(NameNode.getAddress("hdfs://foo:555").getPort(), 555); assertEquals(NameNode.getAddress("foo:555").getPort(), 555); }
public DummyLegacyFailoverProxyProvider(Configuration conf, URI uri, Class<T> xface) { try { this.proxy = NameNodeProxies.createNonHAProxy(conf, NameNode.getAddress(uri), xface, UserGroupInformation.getCurrentUser(), false).getProxy(); this.xface = xface; } catch (IOException ioe) { } }
@Test public void testGetAddressFromConf() throws Exception { Configuration conf = new HdfsConfiguration(); FileSystem.setDefaultUri(conf, "hdfs://foo/"); assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT); FileSystem.setDefaultUri(conf, "hdfs://foo:555/"); assertEquals(NameNode.getAddress(conf).getPort(), 555); FileSystem.setDefaultUri(conf, "foo"); assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT); }
@BeforeClass public static void testSetUp() throws Exception { conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 2); cluster = new MiniDFSCluster.Builder(conf).build(); fs = cluster.getFileSystem(); fc = FileContext.getFileContext(cluster.getURI(), conf); hftpfs = cluster.getHftpFileSystem(); dfsClient = new DFSClient(NameNode.getAddress(conf), conf); file1 = new Path("filestatus.dat"); writeFile(fs, file1, 1, fileSize, blockSize); }
@BeforeClass public static void testSetUp() throws Exception { conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 2); cluster = new MiniDFSCluster.Builder(conf).build(); fs = cluster.getFileSystem(); fc = FileContext.getFileContext(cluster.getURI(0), conf); hftpfs = cluster.getHftpFileSystem(0); dfsClient = new DFSClient(NameNode.getAddress(conf), conf); file1 = new Path("filestatus.dat"); writeFile(fs, file1, 1, fileSize, blockSize); }