Codota Logo
HdfsServerConstants$StartupOption
Code IndexAdd Codota to your IDE (free)

How to use
HdfsServerConstants$StartupOption
in
org.apache.hadoop.hdfs.server.common

Best Java code snippets using org.apache.hadoop.hdfs.server.common.HdfsServerConstants$StartupOption (Showing top 20 results out of 315)

  • Common ways to obtain HdfsServerConstants$StartupOption
private void myMethod () {
HdfsServerConstants$StartupOption h =
  • Codota IconHdfsServerConstants.StartupOption hdfsServerConstantsStartupOption;String value;hdfsServerConstantsStartupOption.getEnum(value)
  • Codota IconHdfsServerConstants.StartupOption hdfsServerConstantsStartupOption;String str;hdfsServerConstantsStartupOption.valueOf(str)
  • Codota IconHdfsServerConstants.StartupOption hdfsServerConstantsStartupOption;Matcher matcher;hdfsServerConstantsStartupOption.valueOf(matcher.group(int1))
  • Smart code suggestions by Codota
}
origin: org.apache.hadoop/hadoop-hdfs

String clusterId = StartupOption.FORMAT.getClusterId();
if(clusterId == null || clusterId.equals("")) {
origin: org.apache.hadoop/hadoop-hdfs

static StartupOption getStartupOption(Configuration conf) {
 String value = conf.get(DFS_DATANODE_STARTUP_KEY,
             StartupOption.REGULAR.toString());
 return StartupOption.getEnum(value);
}
origin: org.apache.hadoop/hadoop-hdfs

for(int i=0; i < argsLen; i++) {
 String cmd = args[i];
 if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
  startOpt = StartupOption.FORMAT;
  for (i = i + 1; i < argsLen; i++) {
   if (args[i].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
    i++;
    if (i >= argsLen) {
       + StartupOption.CLUSTERID.getName() + " flag");
     return null;
      clusterId.equalsIgnoreCase(StartupOption.FORCE.getName()) ||
      clusterId.equalsIgnoreCase(
        StartupOption.NONINTERACTIVE.getName())) {
     LOG.error("Must specify a valid cluster ID after the "
       + StartupOption.CLUSTERID.getName() + " flag");
     return null;
    startOpt.setClusterId(clusterId);
   if (args[i].equalsIgnoreCase(StartupOption.FORCE.getName())) {
    startOpt.setForceFormat(true);
   if (args[i].equalsIgnoreCase(StartupOption.NONINTERACTIVE.getName())) {
    startOpt.setInteractiveFormat(false);
 } else if (StartupOption.GENCLUSTERID.getName().equalsIgnoreCase(cmd)) {
  startOpt = StartupOption.GENCLUSTERID;
origin: org.apache.hadoop/hadoop-hdfs

@Override
public String toString() {
 if (this == ROLLINGUPGRADE) {
  return new StringBuilder(super.toString())
    .append("(").append(getRollingUpgradeStartupOption()).append(")")
    .toString();
 }
 return super.toString();
}
origin: ch.cern.hadoop/hadoop-hdfs

@Before
public void setUp() throws IOException {
 ExitUtil.disableSystemExit();
 String baseDir = PathUtils.getTestDirName(getClass());
 hdfsDir = new File(baseDir, "dfs/name");
 if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
  throw new IOException("Could not delete test directory '" + hdfsDir + "'");
 }
 LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());
 // as some tests might change these values we reset them to defaults before
 // every test
 StartupOption.FORMAT.setForceFormat(false);
 StartupOption.FORMAT.setInteractiveFormat(true);
 
 config = new Configuration();
 config.set(DFS_NAMENODE_NAME_DIR_KEY, hdfsDir.getPath());
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Verify that parsing a StartupOption string gives the expected results.
 * If a RollingUpgradeStartupOption is specified than it is also checked.
 *
 * @param value
 * @param expectedOption
 * @param expectedRollupOption optional, may be null.
 */
private static void verifyStartupOptionResult(String value,
  StartupOption expectedOption,
  RollingUpgradeStartupOption expectedRollupOption) {
 StartupOption option = StartupOption.getEnum(value);
 assertEquals(expectedOption, option);
 if (expectedRollupOption != null) {
  assertEquals(expectedRollupOption, option.getRollingUpgradeStartupOption());
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Tests the upgrade from one version of Federation to another Federation
 * version Test with wrong clusterid case: -upgrade -clusterid <cid> 
 * Expected to reuse existing clusterid and ignore user given clusterid
 * 
 * @throws Exception
 */
@Test
public void testStartupOptUpgradeFromFederationWithWrongCID()
  throws Exception {
 startOpt.setClusterId("wrong-cid");
 storage.setClusterID("currentcid");
 layoutVersion = Feature.FEDERATION.getInfo().getLayoutVersion();
 storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
 assertEquals("Clusterid should match with the existing one",
   "currentcid", storage.getClusterID());
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Tests the upgrade from version 0.22 to Federation version Test with
 * clusterid case: -upgrade -clusterid <cid> 
 * Expected to reuse user given clusterid
 * 
 * @throws Exception
 */
@Test
public void testStartupOptUpgradeFrom22WithCID() throws Exception {
 startOpt.setClusterId("cid");
 layoutVersion = Feature.RESERVED_REL22.getInfo().getLayoutVersion();
 storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
 assertEquals("Clusterid should match with the given clusterid",
   "cid", storage.getClusterID());
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Stores the information related to a namenode in the cluster
 */
public static class NameNodeInfo {
 final NameNode nameNode;
 final Configuration conf;
 final String nameserviceId;
 final String nnId;
 StartupOption startOpt;
 NameNodeInfo(NameNode nn, String nameserviceId, String nnId,
   StartupOption startOpt, Configuration conf) {
  this.nameNode = nn;
  this.nameserviceId = nameserviceId;
  this.nnId = nnId;
  this.startOpt = startOpt;
  this.conf = conf;
 }
 
 public void setStartOpt(StartupOption startOpt) {
  this.startOpt = startOpt;
 }
}

origin: ch.cern.hadoop/hadoop-hdfs

@Test
public void testFormatClusterIdOption() throws IOException {
 
 // 1. should format without cluster id
 //StartupOption.FORMAT.setClusterId("");
 NameNode.format(config);
 // see if cluster id not empty.
 String cid = getClusterId(config);
 assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")) );
 // 2. successful format with given clusterid
 StartupOption.FORMAT.setClusterId("mycluster");
 NameNode.format(config);
 // see if cluster id matches with given clusterid.
 cid = getClusterId(config);
 assertTrue("ClusterId didn't match", cid.equals("mycluster"));
 // 3. format without any clusterid again. It should generate new
 //clusterid.
 StartupOption.FORMAT.setClusterId("");
 NameNode.format(config);
 String newCid = getClusterId(config);
 assertFalse("ClusterId should not be the same", newCid.equals(cid));
}
origin: ch.cern.hadoop/hadoop-hdfs

BackupNode startBackupNode(Configuration conf,
              StartupOption startupOpt,
              int idx) throws IOException {
 Configuration c = new HdfsConfiguration(conf);
 String dirs = getBackupNodeDir(startupOpt, idx);
 c.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dirs);
 c.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
   "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
 c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
   "127.0.0.1:0");
 c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
     "127.0.0.1:0");
 BackupNode bn = (BackupNode)NameNode.createNameNode(
   new String[]{startupOpt.getName()}, c);
 assertTrue(bn.getRole() + " must be in SafeMode.", bn.isInSafeMode());
 assertTrue(bn.getRole() + " must be in StandbyState",
       bn.getNamesystem().getHAState()
        .equalsIgnoreCase(HAServiceState.STANDBY.name()));
 return bn;
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Start the BackupNode
 */
public BackupNode startBackupNode(Configuration conf) throws IOException {
 // Set up testing environment directories
 hdfsDir = new File(TEST_DATA_DIR, "backupNode");
 if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
  throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
 }
 File currDir = new File(hdfsDir, "name2");
 File currDir2 = new File(currDir, "current");
 File currDir3 = new File(currDir, "image");
 
 assertTrue(currDir.mkdirs());
 assertTrue(currDir2.mkdirs());
 assertTrue(currDir3.mkdirs());
 
 conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
   fileAsURI(new File(hdfsDir, "name2")).toString());
 conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
   "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
 
 // Start BackupNode
 String[] args = new String [] { StartupOption.BACKUP.getName() };
 BackupNode bu = (BackupNode)NameNode.createNameNode(args, conf);
 return bu;
}

origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Parse and verify command line arguments and set configuration parameters.
 *
 * @return false if passed argements are incorrect
 */
@VisibleForTesting
static boolean parseArguments(String args[], Configuration conf) {
 StartupOption startOpt = StartupOption.REGULAR;
 int i = 0;
 if (args != null && args.length != 0) {
  String cmd = args[i++];
  if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
   LOG.error("-r, --rack arguments are not supported anymore. RackID " +
     "resolution is handled by the NameNode.");
   return false;
  } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
   startOpt = StartupOption.ROLLBACK;
  } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
   startOpt = StartupOption.REGULAR;
  } else {
   return false;
  }
 }
 setStartupOption(conf, startOpt);
 return (args == null || i == args.length);    // Fail if more than one cmd specified!
}
origin: ch.cern.hadoop/hadoop-hdfs

static String getBackupNodeDir(StartupOption t, int idx) {
 return BASE_DIR + "name" + t.getName() + idx + "/";
}
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Parse and verify command line arguments and set configuration parameters.
 *
 * @return false if passed argements are incorrect
 */
@VisibleForTesting
static boolean parseArguments(String args[], Configuration conf) {
 StartupOption startOpt = StartupOption.REGULAR;
 int i = 0;
 if (args != null && args.length != 0) {
  String cmd = args[i++];
  if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
   LOG.error("-r, --rack arguments are not supported anymore. RackID " +
     "resolution is handled by the NameNode.");
   return false;
  } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
   startOpt = StartupOption.ROLLBACK;
  } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
   startOpt = StartupOption.REGULAR;
  } else {
   return false;
  }
 }
 setStartupOption(conf, startOpt);
 return (args == null || i == args.length);    // Fail if more than one cmd specified!
}
origin: io.prestosql.hadoop/hadoop-apache

public String getOptionString() {
 return StartupOption.ROLLINGUPGRADE.getName() + " "
   + StringUtils.toLowerCase(name());
}
origin: ch.cern.hadoop/hadoop-hdfs

StartupOption.FORMAT.setClusterId("DifferentCID");
cluster.addNameNode(conf, 9948);
NameNode nn4 = cluster.getNameNode(3);
origin: io.prestosql.hadoop/hadoop-apache

public boolean matches(StartupOption option) {
 return option == StartupOption.ROLLINGUPGRADE
   && option.getRollingUpgradeStartupOption() == this;
}
origin: io.prestosql.hadoop/hadoop-apache

 if (startOpt.getClusterId() == null) {
  startOpt.setClusterId(newClusterID());
 setClusterID(startOpt.getClusterId());
 setBlockPoolID(newBlockPoolID());
} else {
 if (startOpt.getClusterId() != null
   && !startOpt.getClusterId().equals(getClusterID())) {
  LOG.warn("Clusterid mismatch - current clusterid: " + getClusterID()
    + ", Ignoring given clusterid: " + startOpt.getClusterId());
origin: ch.cern.hadoop/hadoop-hdfs

public String getOptionString() {
 return StartupOption.ROLLINGUPGRADE.getName() + " "
   + StringUtils.toLowerCase(name());
}
org.apache.hadoop.hdfs.server.commonHdfsServerConstants$StartupOption

Javadoc

Startup options

Most used methods

  • getClusterId
  • getEnum
  • getName
  • getRollingUpgradeStartupOption
  • setClusterId
  • setForceFormat
  • setInteractiveFormat
  • createRecoveryContext
  • getForce
  • getForceFormat
  • getInteractiveFormat
  • name
  • getInteractiveFormat,
  • name,
  • setForce,
  • setRollingUpgradeStartupOption,
  • toNodeRole,
  • toString,
  • valueOf

Popular in Java

  • Running tasks concurrently on multiple threads
  • findViewById (Activity)
  • setContentView (Activity)
  • addToBackStack (FragmentTransaction)
  • Graphics2D (java.awt)
    This Graphics2D class extends the Graphics class to provide more sophisticated control overgraphics
  • HashMap (java.util)
    HashMap is an implementation of Map. All optional operations are supported.All elements are permitte
  • BlockingQueue (java.util.concurrent)
    A java.util.Queue that additionally supports operations that wait for the queue to become non-empty
  • Handler (java.util.logging)
    A Handler object accepts a logging request and exports the desired messages to a target, for example
  • ImageIO (javax.imageio)
  • HttpServletRequest (javax.servlet.http)
    Extends the javax.servlet.ServletRequest interface to provide request information for HTTP servlets.
Codota Logo
  • Products

    Search for Java codeSearch for JavaScript codeEnterprise
  • IDE Plugins

    IntelliJ IDEAWebStormAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogCodota Academy Plugin user guide Terms of usePrivacy policyJava Code IndexJavascript Code Index
Get Codota for your IDE now