public int getDataNodePort() { HdfsConfiguration.init(); Configuration dnConf = new HdfsConfiguration(masterServices.getConfiguration()); int dnPort = NetUtils.createSocketAddr( dnConf.get(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT)).getPort(); LOG.debug("Loaded default datanode port for FN: " + datanodeDataTransferPort); return dnPort; }
Configuration result = new HdfsConfiguration(); boolean foundHadoopConfiguration = false;
/** * Construct a SnapshotDiff object. */ public SnapshotDiff() { this(new HdfsConfiguration()); }
@Override // ReconfigurableBase protected Configuration getNewConf() { return new HdfsConfiguration(); } }
/** * Construct a DFSAdmin object. */ public DFSAdmin() { this(new HdfsConfiguration()); }
@Override // ReconfigurableBase protected Configuration getNewConf() { return new HdfsConfiguration(); }
@Override public void setConf(Configuration conf) { conf = new HdfsConfiguration(conf); String nameNodePrincipal = conf.get( DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""); if (LOG.isDebugEnabled()) { LOG.debug("Using NN principal: " + nameNodePrincipal); } conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, nameNodePrincipal); super.setConf(conf); }
/** * Add the requisite security principal settings to the given Configuration, * returning a copy. * @param conf the original config * @return a copy with the security settings added */ public static Configuration addSecurityConfiguration(Configuration conf) { // Make a copy so we don't mutate it. Also use an HdfsConfiguration to // force loading of hdfs-site.xml. conf = new HdfsConfiguration(conf); String nameNodePrincipal = conf.get( DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""); if (LOG.isDebugEnabled()) { LOG.debug("Using NN principal: " + nameNodePrincipal); } conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, nameNodePrincipal); return conf; }
public static void main(String[] argv) throws Exception { if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) { System.exit(0); } int res = ToolRunner.run(new GetGroups(new HdfsConfiguration()), argv); System.exit(res); } }
public static void main(String[] args) throws Exception { if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) { System.exit(0); } int res = ToolRunner.run(new GetConf(new HdfsConfiguration()), args); System.exit(res); } }
private static void setTimeout(HttpURLConnection connection) { if (timeout <= 0) { Configuration conf = new HdfsConfiguration(); timeout = conf.getInt(DFSConfigKeys.DFS_IMAGE_TRANSFER_TIMEOUT_KEY, DFSConfigKeys.DFS_IMAGE_TRANSFER_TIMEOUT_DEFAULT); LOG.info("Image Transfer timeout configured to " + timeout + " milliseconds"); } Util.setTimeout(connection, timeout); } }
/** * Main for the DiskBalancer Command handling. * * @param argv - System Args Strings[] * @throws Exception */ public static void main(String[] argv) throws Exception { DiskBalancerCLI shell = new DiskBalancerCLI(new HdfsConfiguration()); int res = 0; try { res = ToolRunner.run(shell, argv); } catch (Exception ex) { String msg = String.format("Exception thrown while running %s.", DiskBalancerCLI.class.getSimpleName()); LOG.error(msg, ex); res = 1; } System.exit(res); }
public static void main(String[] args) throws Exception { // -files option is also used by GenericOptionsParser // Make sure that is not the first argument for fsck int res = -1; if ((args.length == 0) || ("-files".equals(args[0]))) { printUsage(System.err); } else if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) { res = 0; } else { res = ToolRunner.run(new DFSck(new HdfsConfiguration()), args); } System.exit(res); } }
/** * Run a balancer * @param args Command line arguments */ public static void main(String[] args) { if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) { System.exit(0); } try { System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args)); } catch (Throwable e) { LOG.error("Exiting balancer due an exception", e); System.exit(-1); } } }
@Override public void init(DaemonContext context) throws Exception { System.err.println("Initializing secure datanode resources"); // Create a new HdfsConfiguration object to ensure that the configuration in // hdfs-site.xml is picked up. Configuration conf = new HdfsConfiguration(); // Stash command-line arguments for regular datanode args = context.getArguments(); resources = getSecureResources(conf); }
/** * Run a Mover in command line. * * @param args Command line arguments */ public static void main(String[] args) { if (DFSUtil.parseHelpArgument(args, Cli.USAGE, System.out, true)) { System.exit(0); } try { System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args)); } catch (Throwable e) { LOG.error("Exiting " + Mover.class.getSimpleName() + " due to an exception", e); System.exit(-1); } } }
/** Reread include/exclude files. */ private void refreshHostsReader(Configuration conf) throws IOException { // Reread the conf to get dfs.hosts and dfs.hosts.exclude filenames. // Update the file names and refresh internal includes and excludes list. if (conf == null) { conf = new HdfsConfiguration(); this.hostConfigManager.setConf(conf); } this.hostConfigManager.refresh(); }
public static void main(String args[]) throws Exception { StringUtils.startupShutdownMessage(DFSZKFailoverController.class, args, LOG); if (DFSUtil.parseHelpArgument(args, ZKFailoverController.USAGE, System.out, true)) { System.exit(0); } GenericOptionsParser parser = new GenericOptionsParser( new HdfsConfiguration(), args); DFSZKFailoverController zkfc = DFSZKFailoverController.create( parser.getConfiguration()); try { System.exit(zkfc.run(parser.getRemainingArgs())); } catch (Throwable t) { LOG.error("DFSZKFailOverController exiting due to earlier exception " + t); terminate(1, t); } }
void refreshNodes() throws IOException { String operationName = "refreshNodes"; checkOperation(OperationCategory.UNCHECKED); checkSuperuserPrivilege(operationName); getBlockManager().getDatanodeManager().refreshNodes(new HdfsConfiguration()); logAuditEvent(true, operationName, null); }
public static MiniDFSCluster getLocalHDFSCluster() throws Exception { setHadoopHomeWindows(); Configuration conf = new HdfsConfiguration(); conf.set("fs.defaultFS", "hdfs://localhost"); File hdfsPath = new File(System.getProperty("user.dir") + File.separator + "hadoop" + File.separator + "hdfs"); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsPath.getAbsolutePath()); MiniDFSCluster miniDFSCluster = new MiniDFSCluster.Builder(conf) .nameNodePort(12345) .nameNodeHttpPort(12341) .numDataNodes(1) .storagesPerDatanode(2) .format(true) .racks(null) .build(); miniDFSCluster.waitActive(); return miniDFSCluster; } }