/** * Get the name service Id for the node * @return name service Id or null if federation is not configured */ protected String getNameServiceId(Configuration conf) { return DFSUtil.getNamenodeNameServiceId(conf); }
LOG.info("Performing action: Restart active namenode"); Configuration conf = FSUtils.getRootDir(getConf()).getFileSystem(getConf()).getConf(); String nameServiceID = DFSUtil.getNamenodeNameServiceId(conf); if (!HAUtil.isHAEnabled(conf, nameServiceID)) { throw new Exception("HA for namenode is not enabled");
public static List<RemoteNameNodeInfo> getRemoteNameNodes(Configuration conf) throws IOException { String nsId = DFSUtil.getNamenodeNameServiceId(conf); return getRemoteNameNodes(conf, nsId); }
@Override public List<HAServiceTarget> getAllOtherNodes() { String nsId = DFSUtil.getNamenodeNameServiceId(conf); List<String> otherNn = HAUtil.getNameNodeIdOfOtherNodes(conf, nsId); List<HAServiceTarget> targets = new ArrayList<HAServiceTarget>(otherNn.size()); for (String nnId : otherNn) { targets.add(new NNHAServiceTarget(conf, nsId, nnId)); } return targets; } }
/** * Given the configuration for this node, return a list of configurations * for the other nodes in an HA setup. * * @param myConf the configuration of this node * @return a list of configuration of other nodes in an HA setup */ public static List<Configuration> getConfForOtherNodes( Configuration myConf) { String nsId = DFSUtil.getNamenodeNameServiceId(myConf); List<String> otherNodes = getNameNodeIdOfOtherNodes(myConf, nsId); // Look up the address of the other NNs List<Configuration> confs = new ArrayList<Configuration>(otherNodes.size()); myConf = new Configuration(myConf); // unset independent properties for (String idpKey : HA_SPECIAL_INDEPENDENT_KEYS) { myConf.unset(idpKey); } for (String nn : otherNodes) { Configuration confForOtherNode = new Configuration(myConf); NameNode.initializeGenericKeys(confForOtherNode, nsId, nn); confs.add(confForOtherNode); } return confs; }
String confKey, String defaultValue, String bindHostKey) { InetSocketAddress address; String nsId = DFSUtil.getNamenodeNameServiceId(conf); String bindHostActualKey; if (nsId != null) {
/** * returns the list of all namenode ids for the given configuration */ @Override protected Collection<String> getTargetIds(String namenodeToActivate) { return DFSUtilClient.getNameNodeIds(getConf(), (nameserviceId != null) ? nameserviceId : DFSUtil.getNamenodeNameServiceId( getConf())); }
DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY, encryptionAlgorithm); String nsId = DFSUtil.getNamenodeNameServiceId(conf); boolean isHaEnabled = HAUtil.isHAEnabled(conf, nsId); boolean shouldWriteProtobufToken = conf.getBoolean(
if (HAUtil.isHAEnabled(conf, DFSUtil.getNamenodeNameServiceId(conf))) { List<Configuration> otherNnConfs = HAUtil.getConfForOtherNodes(conf); for (Configuration otherNnConf : otherNnConfs) {
@VisibleForTesting public static boolean doRollback(Configuration conf, boolean isConfirmationNeeded) throws IOException { String nsId = DFSUtil.getNamenodeNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); initializeGenericKeys(conf, nsId, namenodeId); FSNamesystem nsys = new FSNamesystem(conf, new FSImage(conf)); System.err.print( "\"rollBack\" will remove the current state of the file system,\n" + "returning you to the state prior to initiating your recent.\n" + "upgrade. This action is permanent and cannot be undone. If you\n" + "are performing a rollback in an HA environment, you should be\n" + "certain that no NameNode process is running on any host."); if (isConfirmationNeeded) { if (!confirmPrompt("Roll back file system state?")) { System.err.println("Rollback aborted."); return true; } } nsys.getFSImage().doRollback(nsys); return false; }
private void parseConfAndFindOtherNN() throws IOException { Configuration conf = getConf(); nsId = DFSUtil.getNamenodeNameServiceId(conf);
nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); this.haEnabled = HAUtil.isHAEnabled(conf, nameserviceId);
/** * Verify that configured directories exist, then print the metadata versions * of the software and the image. * * @param conf configuration to use * @throws IOException */ private static boolean printMetadataVersion(Configuration conf) throws IOException { final String nsId = DFSUtil.getNamenodeNameServiceId(conf); final String namenodeId = HAUtil.getNameNodeId(conf, nsId); NameNode.initializeGenericKeys(conf, nsId, namenodeId); final FSImage fsImage = new FSImage(conf); final FSNamesystem fs = new FSNamesystem(conf, fsImage, false); return fsImage.recoverTransitionRead( StartupOption.METADATAVERSION, fs, null); }
private static void doRecovery(StartupOption startOpt, Configuration conf) throws IOException { String nsId = DFSUtil.getNamenodeNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); initializeGenericKeys(conf, nsId, namenodeId);
/** rollback for rolling upgrade. */ private void rollingRollback(long discardSegmentTxId, long ckptId) throws IOException { // discard discard unnecessary editlog segments starting from the given id this.editLog.discardSegments(discardSegmentTxId); // rename the special checkpoint renameCheckpoint(ckptId, NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE, true); // purge all the checkpoints after the marker archivalManager.purgeCheckpoinsAfter(NameNodeFile.IMAGE, ckptId); // HDFS-7939: purge all old fsimage_rollback_* archivalManager.purgeCheckpoints(NameNodeFile.IMAGE_ROLLBACK); String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); if (HAUtil.isHAEnabled(conf, nameserviceId)) { // close the editlog since it is currently open for write this.editLog.close(); // reopen the editlog for read this.editLog.initSharedJournalsForRead(); } }
String nsId = DFSUtil.getNamenodeNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); initializeGenericKeys(conf, nsId, namenodeId);
public void initEditLog(StartupOption startOpt) throws IOException { Preconditions.checkState(getNamespaceID() != 0, "Must know namespace ID before initting edit log"); String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); if (!HAUtil.isHAEnabled(conf, nameserviceId)) {
public static DFSZKFailoverController create(Configuration conf) { Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf); String nsId = DFSUtil.getNamenodeNameServiceId(conf); if (!HAUtil.isHAEnabled(localNNConf, nsId)) { throw new HadoopIllegalArgumentException( "HA is not enabled for this namenode."); } String nnId = HAUtil.getNameNodeId(localNNConf, nsId); if (nnId == null) { String msg = "Could not get the namenode ID of this node. " + "You may run zkfc on the node other than namenode."; throw new HadoopIllegalArgumentException(msg); } NameNode.initializeGenericKeys(localNNConf, nsId, nnId); DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS); NNHAServiceTarget localTarget = new NNHAServiceTarget( localNNConf, nsId, nnId); return new DFSZKFailoverController(localNNConf, localTarget); }
String nsId = DFSUtil.getNamenodeNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); initializeGenericKeys(conf, nsId, namenodeId);
/** * Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure * nameserviceId for namenode is determined based on matching the address with * local node's address */ @Test public void getNameNodeNameServiceId() { Configuration conf = setupAddress(DFS_NAMENODE_RPC_ADDRESS_KEY); assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf)); }