Configuration conf = FSUtils.getRootDir(getConf()).getFileSystem(getConf()).getConf(); String nameServiceID = DFSUtil.getNamenodeNameServiceId(conf); if (!HAUtil.isHAEnabled(conf, nameServiceID)) { throw new Exception("HA for namenode is not enabled");
/** * @return a collection of all configured NN Kerberos principals. */ public static Set<String> getAllNnPrincipals(Configuration conf) throws IOException { Set<String> principals = new HashSet<String>(); for (String nsId : DFSUtilClient.getNameServiceIds(conf)) { if (HAUtil.isHAEnabled(conf, nsId)) { for (String nnId : DFSUtilClient.getNameNodeIds(conf, nsId)) { Configuration confForNn = new Configuration(conf); NameNode.initializeGenericKeys(confForNn, nsId, nnId); String principal = SecurityUtil.getServerPrincipal(confForNn .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY), DFSUtilClient.getNNAddress(confForNn).getHostName()); principals.add(principal); } } else { Configuration confForNn = new Configuration(conf); NameNode.initializeGenericKeys(confForNn, nsId, null); String principal = SecurityUtil.getServerPrincipal(confForNn .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY), DFSUtilClient.getNNAddress(confForNn).getHostName()); principals.add(principal); } } return principals; }
if (HAUtil.isHAEnabled(conf, DFSUtil.getNamenodeNameServiceId(conf))) { List<Configuration> otherNnConfs = HAUtil.getConfForOtherNodes(conf); for (Configuration otherNnConf : otherNnConfs) {
boolean isHaEnabled = HAUtil.isHAEnabled(conf, nsId); boolean shouldWriteProtobufToken = conf.getBoolean( DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE,
public SecondaryNameNode(Configuration conf, CommandLineOpts commandLineOpts) throws IOException { try { String nsId = DFSUtil.getSecondaryNameServiceId(conf); if (HAUtil.isHAEnabled(conf, nsId)) { throw new IOException( "Cannot use SecondaryNameNode in an HA cluster." + " The Standby Namenode will perform checkpointing."); } NameNode.initializeGenericKeys(conf, nsId, null); initialize(conf, commandLineOpts); } catch (IOException e) { shutdown(); throw e; } catch (HadoopIllegalArgumentException e) { shutdown(); throw e; } }
+ nsId + " can use logical URI: " + e); if (HAUtil.isHAEnabled(conf, nsId) && useLogicalUri) {
URI namenodeURI = null; Configuration newConf = new Configuration(conf); if (HAUtil.isHAEnabled(conf, nsId)) {
nsId = DFSUtil.getNamenodeNameServiceId(conf); if (!HAUtil.isHAEnabled(conf, nsId)) { throw new HadoopIllegalArgumentException( "HA is not enabled for this namenode.");
this.haEnabled = HAUtil.isHAEnabled(conf, nameserviceId);
"Must know namespace ID before initting edit log"); String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); if (!HAUtil.isHAEnabled(conf, nameserviceId)) { } else if (HAUtil.isHAEnabled(conf, nameserviceId) && (startOpt == StartupOption.UPGRADE || startOpt == StartupOption.UPGRADEONLY
/** rollback for rolling upgrade. */ private void rollingRollback(long discardSegmentTxId, long ckptId) throws IOException { // discard discard unnecessary editlog segments starting from the given id this.editLog.discardSegments(discardSegmentTxId); // rename the special checkpoint renameCheckpoint(ckptId, NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE, true); // purge all the checkpoints after the marker archivalManager.purgeCheckpoinsAfter(NameNodeFile.IMAGE, ckptId); // HDFS-7939: purge all old fsimage_rollback_* archivalManager.purgeCheckpoints(NameNodeFile.IMAGE_ROLLBACK); String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); if (HAUtil.isHAEnabled(conf, nameserviceId)) { // close the editlog since it is currently open for write this.editLog.close(); // reopen the editlog for read this.editLog.initSharedJournalsForRead(); } }
public static DFSZKFailoverController create(Configuration conf) { Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf); String nsId = DFSUtil.getNamenodeNameServiceId(conf); if (!HAUtil.isHAEnabled(localNNConf, nsId)) { throw new HadoopIllegalArgumentException( "HA is not enabled for this namenode."); } String nnId = HAUtil.getNameNodeId(localNNConf, nsId); if (nnId == null) { String msg = "Could not get the namenode ID of this node. " + "You may run zkfc on the node other than namenode."; throw new HadoopIllegalArgumentException(msg); } NameNode.initializeGenericKeys(localNNConf, nsId, nnId); DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS); NNHAServiceTarget localTarget = new NNHAServiceTarget( localNNConf, nsId, nnId); return new DFSZKFailoverController(localNNConf, localTarget); }
+ " this namenode/service.", clientNamenodeAddress); this.haEnabled = HAUtil.isHAEnabled(conf, nsId); state = createHAState(getStartupOption(conf)); this.allowStaleStandbyReads = HAUtil.shouldAllowStandbyReads(conf);
public SecondaryNameNode(Configuration conf, CommandLineOpts commandLineOpts) throws IOException { try { String nsId = DFSUtil.getSecondaryNameServiceId(conf); if (HAUtil.isHAEnabled(conf, nsId)) { throw new IOException( "Cannot use SecondaryNameNode in an HA cluster." + " The Standby Namenode will perform checkpointing."); } NameNode.initializeGenericKeys(conf, nsId, null); initialize(conf, commandLineOpts); } catch (IOException e) { shutdown(); throw e; } catch (HadoopIllegalArgumentException e) { shutdown(); throw e; } }
public SecondaryNameNode(Configuration conf, CommandLineOpts commandLineOpts) throws IOException { try { String nsId = DFSUtil.getSecondaryNameServiceId(conf); if (HAUtil.isHAEnabled(conf, nsId)) { throw new IOException( "Cannot use SecondaryNameNode in an HA cluster." + " The Standby Namenode will perform checkpointing."); } NameNode.initializeGenericKeys(conf, nsId, null); initialize(conf, commandLineOpts); } catch (IOException e) { shutdown(); throw e; } catch (HadoopIllegalArgumentException e) { shutdown(); throw e; } }
/** rollback for rolling upgrade. */ private void rollingRollback(long discardSegmentTxId, long ckptId) throws IOException { // discard discard unnecessary editlog segments starting from the given id this.editLog.discardSegments(discardSegmentTxId); // rename the special checkpoint renameCheckpoint(ckptId, NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE, true); // purge all the checkpoints after the marker archivalManager.purgeCheckpoinsAfter(NameNodeFile.IMAGE, ckptId); String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); if (HAUtil.isHAEnabled(conf, nameserviceId)) { // close the editlog since it is currently open for write this.editLog.close(); // reopen the editlog for read this.editLog.initSharedJournalsForRead(); } }
/** rollback for rolling upgrade. */ private void rollingRollback(long discardSegmentTxId, long ckptId) throws IOException { // discard discard unnecessary editlog segments starting from the given id this.editLog.discardSegments(discardSegmentTxId); // rename the special checkpoint renameCheckpoint(ckptId, NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE, true); // purge all the checkpoints after the marker archivalManager.purgeCheckpoinsAfter(NameNodeFile.IMAGE, ckptId); String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); if (HAUtil.isHAEnabled(conf, nameserviceId)) { // close the editlog since it is currently open for write this.editLog.close(); // reopen the editlog for read this.editLog.initSharedJournalsForRead(); } }
public static DFSZKFailoverController create(Configuration conf) { Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf); String nsId = DFSUtil.getNamenodeNameServiceId(conf); if (!HAUtil.isHAEnabled(localNNConf, nsId)) { throw new HadoopIllegalArgumentException( "HA is not enabled for this namenode."); } String nnId = HAUtil.getNameNodeId(localNNConf, nsId); if (nnId == null) { String msg = "Could not get the namenode ID of this node. " + "You may run zkfc on the node other than namenode."; throw new HadoopIllegalArgumentException(msg); } NameNode.initializeGenericKeys(localNNConf, nsId, nnId); DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS); NNHAServiceTarget localTarget = new NNHAServiceTarget( localNNConf, nsId, nnId); return new DFSZKFailoverController(localNNConf, localTarget); }
public static DFSZKFailoverController create(Configuration conf) { Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf); String nsId = DFSUtil.getNamenodeNameServiceId(conf); if (!HAUtil.isHAEnabled(localNNConf, nsId)) { throw new HadoopIllegalArgumentException( "HA is not enabled for this namenode."); } String nnId = HAUtil.getNameNodeId(localNNConf, nsId); if (nnId == null) { String msg = "Could not get the namenode ID of this node. " + "You may run zkfc on the node other than namenode."; throw new HadoopIllegalArgumentException(msg); } NameNode.initializeGenericKeys(localNNConf, nsId, nnId); DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS); NNHAServiceTarget localTarget = new NNHAServiceTarget( localNNConf, nsId, nnId); return new DFSZKFailoverController(localNNConf, localTarget); }
private void setUpHaCluster(boolean security) throws Exception { conf = new Configuration(); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, security); cluster = new MiniQJMHACluster.Builder(conf).build(); setHAConf(conf, cluster.getDfsCluster().getNameNode(0).getHostAndPort(), cluster.getDfsCluster().getNameNode(1).getHostAndPort()); cluster.getDfsCluster().getNameNode(0).getHostAndPort(); admin = new DFSAdmin(); admin.setConf(conf); assertTrue(HAUtil.isHAEnabled(conf, "ns1")); originOut = System.out; originErr = System.err; System.setOut(new PrintStream(out)); System.setErr(new PrintStream(err)); }