Refine search
loginAsNameNodeUser(conf); NameNode.initMetrics(conf, this.getRole()); StartupProgressMetrics.register(startupProgress); startHttpServer(conf); loadNamesystem(conf); startAliasMapServerIfNecessary(conf); rpcServer = createRpcServer(conf); initReconfigurableBackoffKey(); NetUtils.getHostPortString(getNameNodeAddress()); LOG.info("Clients are to use " + clientNamenodeAddress + " to access" + " this namenode/service."); httpServer.setNameNodeAddress(getNameNodeAddress()); httpServer.setFSImage(getFSImage()); startCommonServices(conf); startMetricsLogger(conf);
this.conf = conf; this.namenode = namenode; this.blockManager = namenode.getNamesystem().getBlockManager(); this.networktopology = networktopology; this.out = out; this.bpPolicies = new BlockPlacementPolicies(conf, null, networktopology, namenode.getNamesystem().getBlockManager().getDatanodeManager() .getHost2DatanodeMap()); this.staleInterval = conf.getLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT); this.tracer = new Tracer.Builder("NamenodeFsck").
/** */ public static void main(String argv[]) throws Exception { if (DFSUtil.parseHelpArgument(argv, NameNode.USAGE, System.out, true)) { System.exit(0); } try { StringUtils.startupShutdownMessage(NameNode.class, argv, LOG); NameNode namenode = createNameNode(argv, null); if (namenode != null) { namenode.join(); } } catch (Throwable e) { LOG.error("Failed to start namenode.", e); terminate(1, e); } }
NamenodeRegistration setRegistration() { nodeRegistration = new NamenodeRegistration( NetUtils.getHostPortString(getNameNodeAddress()), NetUtils.getHostPortString(getHttpAddress()), getFSImage().getStorage(), getRole()); return nodeRegistration; }
/** A utility method for creating credentials. */ public static Credentials createCredentials(final NameNode namenode, final UserGroupInformation ugi, final String renewer) throws IOException { final Token<DelegationTokenIdentifier> token = namenode.getRpcServer( ).getDelegationToken(new Text(renewer)); if (token == null) { return null; } final InetSocketAddress addr = namenode.getNameNodeAddress(); SecurityUtil.setTokenService(token, addr); final Credentials c = new Credentials(); c.addToken(new Text(ugi.getShortUserName()), token); return c; }
public NameNodeRpcServer(Configuration conf, NameNode nn) throws IOException { this.nn = nn; this.namesystem = nn.getNamesystem(); this.retryCache = namesystem.getRetryCache(); this.metrics = NameNode.getNameNodeMetrics(); InetSocketAddress serviceRpcAddr = nn.getServiceRpcServerAddress(conf); if (serviceRpcAddr != null) { String bindHost = nn.getServiceRpcServerBindHost(conf); if (bindHost == null) { bindHost = serviceRpcAddr.getHostName(); nn.setRpcServiceServerAddress(conf, serviceRPCAddress); } else { serviceRpcServer = null; InetSocketAddress lifelineRpcAddr = nn.getLifelineRpcServerAddress(conf); if (lifelineRpcAddr != null) { RPC.setProtocolEngine(conf, HAServiceProtocolPB.class, ProtobufRpcEngine.class); String bindHost = nn.getLifelineRpcServerBindHost(conf); nn.setRpcLifelineServerAddress(conf, lifelineRPCAddress); } else { lifelineRpcServer = null; InetSocketAddress rpcAddr = nn.getRpcServerAddress(conf); String bindHost = nn.getRpcServerBindHost(conf);
public void testGetUri() { assertEquals(NameNode.getUri(new InetSocketAddress("foo", 555)), URI.create("hdfs://foo:555")); assertEquals(NameNode.getUri(new InetSocketAddress("foo", NameNode.DEFAULT_PORT)), URI.create("hdfs://foo")); } }
@BeforeClass public static void setUp() throws Exception { cluster = (new MiniDFSCluster.Builder(conf)) .numDataNodes(1).build(); nnAddress = cluster.getNameNode().getNameNodeAddress(); DataNode dn = cluster.getDataNodes().get(0); dnAddress = new InetSocketAddress(dn.getDatanodeId().getIpAddr(), dn.getIpcPort()); }
static void checkPath(MiniDFSCluster cluster, FileSystem fileSys) throws IOException { InetSocketAddress add = cluster.getNameNode().getNameNodeAddress(); // Test upper/lower case fileSys.checkPath(new Path("hdfs://" + add.getHostName().toUpperCase() + ":" + add.getPort())); }
public void startServerForClientRequests() throws IOException { if (this.server == null) { InetSocketAddress socAddr = NameNode.getAddress(getConf()); int handlerCount = getConf().getInt("dfs.namenode.handler.count", 10); // create rpc server this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(), handlerCount, false, getConf()); // The rpc-server port can be ephemeral... ensure we have the correct info this.serverAddress = this.server.getListenerAddress(); FileSystem.setDefaultUri(getConf(), getUri(serverAddress)); if (this.httpServer != null) { // This means the server is being started once out of safemode // and jetty is initialized already this.httpServer.setAttribute("name.node.address", getNameNodeAddress()); } LOG.info("Namenode up at: " + this.serverAddress); this.server.start(); } }
public void testGetAddressFromConf() throws Exception { Configuration conf = new HdfsConfiguration(); FileSystem.setDefaultUri(conf, "hdfs://foo/"); assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT); FileSystem.setDefaultUri(conf, "hdfs://foo:555/"); assertEquals(NameNode.getAddress(conf).getPort(), 555); FileSystem.setDefaultUri(conf, "foo"); assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT); }
public void testCompression() throws IOException { LOG.info("Test compressing image."); Configuration conf = new Configuration(); FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); conf.set("dfs.http.address", "127.0.0.1:0"); File base_dir = new File(System.getProperty( "test.build.data", "build/test/data"), "dfs/"); conf.set("dfs.name.dir", new File(base_dir, "name").getPath()); conf.setBoolean("dfs.permissions", false); NameNode.format(conf); NameNode namenode = new NameNode(conf); namenode.getNamesystem().mkdirs("/test", new PermissionStatus("hairong", null, FsPermission.getDefault()), true); assertTrue(namenode.getFileInfo("/test").isDir()); namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER); namenode.saveNamespace(); namenode.stop(); namenode.join();
@Override protected NameNode doCreate(Dictionary properties) throws Exception { Configuration conf = new Configuration(); for (Enumeration e = properties.keys(); e.hasMoreElements();) { Object key = e.nextElement(); Object val = properties.get(key); conf.set( key.toString(), val.toString() ); } boolean exists = false; for (File file : FSNamesystem.getNamespaceDirs(conf)) { exists |= file.exists(); } if (!exists) { NameNode.format(conf); } NameNode nameNode = NameNode.createNameNode(null, conf); return nameNode; }
/** * Gets the rpc port used by the NameNode, because the caller * supplied port is not necessarily the actual port used. */ public int getNameNodePort() { return nameNode.getNameNodeAddress().getPort(); }
/** * Tests setting the rpc port to the same as the web port to test that * an exception * is thrown when trying to re-use the same port */ @Test(expected = BindException.class, timeout = 300000) public void testThatMatchingRPCandHttpPortsThrowException() throws IOException { NameNode nameNode = null; try { Configuration conf = new HdfsConfiguration(); File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name"); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath()); Random rand = new Random(); final int port = 30000 + rand.nextInt(30000); // set both of these to the same port. It should fail. FileSystem.setDefaultUri(conf, "hdfs://localhost:" + port); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + port); DFSTestUtil.formatNameNode(conf); nameNode = new NameNode(conf); } finally { if (nameNode != null) { nameNode.stop(); } } }
NameNode startNameNode( Configuration conf, String imageDirs, String editsDirs, StartupOption start) throws IOException { conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://localhost:0"); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, imageDirs); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, editsDirs); String[] args = new String[]{start.getName()}; NameNode nn = NameNode.createNameNode(args, conf); Assert.assertTrue(nn.isInSafeMode()); return nn; }
/** * Initiates and sets a spied on FSNamesystem so tests can hook its methods * @throws IOException if an error occurred */ @Before public void startUp() throws IOException { conf = new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, NAME_DIR); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, NAME_DIR); // avoid stubbing access control conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); NameNode.initMetrics(conf, NamenodeRole.ACTIVE); FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); NameNode.format(conf); fsn = spy(new FSNamesystem(conf)); }
@Before public void setUp() throws Exception { FileSystem.setDefaultUri(CONF, "hdfs://localhost:0"); CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); // Set properties to make HDFS aware of NodeGroup. CONF.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, BlockPlacementPolicyWithNodeGroup.class.getName()); CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, NetworkTopologyWithNodeGroup.class.getName()); CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true); File baseDir = PathUtils.getTestDir(TestReplicationPolicyWithNodeGroup.class); CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath()); DFSTestUtil.formatNameNode(CONF); namenode = new NameNode(CONF); final BlockManager bm = namenode.getNamesystem().getBlockManager(); replicator = bm.getBlockPlacementPolicy(); cluster = bm.getDatanodeManager().getNetworkTopology(); // construct network topology for(int i=0; i<NUM_OF_DATANODES; i++) { cluster.add(dataNodes[i]); } setupDataNodeCapacity(); }
registerNNSMXBean(); if (NamenodeRole.NAMENODE != role) { startHttpServer(conf); httpServer.setNameNodeAddress(getNameNodeAddress()); httpServer.setFSImage(getFSImage()); plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY, ServicePlugin.class); } catch (RuntimeException e) { String pluginsValue = conf.get(DFS_NAMENODE_PLUGINS_KEY); LOG.error("Unable to load NameNode plugins. Specified list of plugins: " + pluginsValue, e); LOG.info(getRole() + " RPC up at: " + getNameNodeAddress()); if (rpcServer.getServiceRpcAddress() != null) { LOG.info(getRole() + " service RPC up at: " + rpcServer.getServiceRpcAddress());
public static NameNode createNameNode(String argv[], Configuration conf) throws IOException { if (conf == null) conf = new Configuration(); StartupOptionAndService startOpt = parseArguments(argv); if (startOpt == null) { printUsage(); return null; } if (!validateServiceName(conf, startOpt.serviceName)) { return null; } initializeGenericKeys(conf, startOpt.serviceName); setupDefaultURI(conf); setStartupOption(conf, startOpt.startupOption); switch (startOpt.startupOption) { case FORMAT: boolean aborted = format(conf, true); System.exit(aborted ? 1 : 0); case FINALIZE: aborted = finalize(conf, true); System.exit(aborted ? 1 : 0); default: } NameNode namenode = new NameNode(conf, startOpt.failOnTxIdMismatch); namenode.nameserviceId = startOpt.serviceName; return namenode; }