/** * Sets up a MiniDFSCluster, configures it to create one edits file, * starts DelegationTokenSecretManager (to get security op codes) * * @param dfsDir DFS directory (where to setup MiniDFS cluster) */ public void startCluster(String dfsDir) throws IOException { // same as manageDfsDirs but only one edits file instead of two config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Util.fileAsURI(new File(dfsDir, "name")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, Util.fileAsURI(new File(dfsDir, "namesecondary1")).toString()); // blocksize for concat (file size must be multiple of blocksize) config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); // for security to work (fake JobTracker user) config.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL, "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT"); config.setBoolean( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); config.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(config).manageNameDfsDirs(false).build(); cluster.waitClusterUp(); }
/** * Attempts to start a NameNode with the given operation. Starting * the NameNode should throw an exception. */ void startNameNodeShouldFail(StartupOption operation, String searchString) { try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .startupOption(operation) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .build(); // should fail throw new AssertionError("NameNode should have failed to start"); } catch (Exception expected) { if (!expected.getMessage().contains(searchString)) { fail("Expected substring '" + searchString + "' in exception " + "but got: " + StringUtils.stringifyException(expected)); } // expected } }
@Test // Upgrade from LDir-based layout to block ID-based layout -- change described // in HDFS-6482 public void testUpgradeToIdBasedLayout() throws IOException { TestDFSUpgradeFromImage upgrade = new TestDFSUpgradeFromImage(); upgrade.unpackStorage(HADOOP24_DATANODE, HADOOP_DATANODE_DIR_TXT); Configuration conf = new Configuration(TestDFSUpgradeFromImage.upgradeConf); conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, System.getProperty("test.build.data") + File.separator + "dfs" + File.separator + "data"); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, System.getProperty("test.build.data") + File.separator + "dfs" + File.separator + "name"); upgrade.upgradeAndVerify(new MiniDFSCluster.Builder(conf).numDataNodes(1) .manageDataDfsDirs(false).manageNameDfsDirs(false), null); } }
cluster = new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false) .numDataNodes(0).build(); NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
.manageNameDfsDirs(false) .build(); cluster.shutdown(); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(0) .manageNameDfsDirs(false) .format(false) .build();
.manageNameDfsDirs(false) .waitSafeMode(false) .build();
@Test (timeout = 30000) public void testLogAndRestart() throws IOException { conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image"); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, mjc.getQuorumJournalURI("myjournal").toString()); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(0) .manageNameDfsDirs(false) .build(); try { cluster.getFileSystem().mkdirs(TEST_PATH); // Restart the NN and make sure the edit was persisted // and loaded again cluster.restartNameNode(); assertTrue(cluster.getFileSystem().exists(TEST_PATH)); cluster.getFileSystem().mkdirs(TEST_PATH_2); // Restart the NN again and make sure both edits are persisted. cluster.restartNameNode(); assertTrue(cluster.getFileSystem().exists(TEST_PATH)); assertTrue(cluster.getFileSystem().exists(TEST_PATH_2)); } finally { cluster.shutdown(); } }
/** * start with -importCheckpoint option and verify that the files are in separate directories and of the right length * @throws IOException */ private void checkNameNodeFiles() throws IOException{ // start namenode with import option LOG.info("-- about to start DFS cluster"); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(config) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .startupOption(IMPORT).build(); cluster.waitActive(); LOG.info("--NN started with checkpoint option"); NameNode nn = cluster.getNameNode(); assertNotNull(nn); // Verify that image file sizes did not change. FSImage image = nn.getFSImage(); verifyDifferentDirs(image, this.fsimageLength, this.editsLength); } finally { if(cluster != null) cluster.shutdown(); } }
/** * start with -importCheckpoint option and verify that the files are in separate directories and of the right length * @throws IOException */ private void checkNameNodeFiles() throws IOException{ // start namenode with import option LOG.info("-- about to start DFS cluster"); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(config) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .startupOption(IMPORT).build(); cluster.waitActive(); LOG.info("--NN started with checkpoint option"); NameNode nn = cluster.getNameNode(); assertNotNull(nn); // Verify that image file sizes did not change. FSImage image = nn.getFSImage(); verifyDifferentDirs(image, this.fsimageLength, this.editsLength); } finally { if(cluster != null) cluster.shutdown(); } }
/** * Assert that, if sdToLock is locked, the cluster is not allowed to start up. * @param conf cluster conf to use * @param sdToLock the storage directory to lock */ private static void assertClusterStartFailsWhenDirLocked( Configuration conf, StorageDirectory sdToLock) throws IOException { // Lock the edits dir, then start the NN, and make sure it fails to start sdToLock.lock(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).format(false) .manageNameDfsDirs(false).numDataNodes(0).build(); assertFalse("cluster should fail to start after locking " + sdToLock, sdToLock.isLockSupported()); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains("already locked", ioe); } finally { cleanup(cluster); cluster = null; sdToLock.unlock(); } }
/** * Attempts to start a NameNode with the given operation. Starting * the NameNode should throw an exception. */ void startNameNodeShouldFail(String searchString) { try { NameNode.doRollback(conf, false); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .build(); // should fail throw new AssertionError("NameNode should have failed to start"); } catch (Exception expected) { if (!expected.getMessage().contains(searchString)) { fail("Expected substring '" + searchString + "' in exception " + "but got: " + StringUtils.stringifyException(expected)); } // expected } }
@Test(timeout=30000) public void testCorruptImageFallback() throws IOException { // Create two checkpoints createCheckPoint(2); // Delete a single md5sum corruptFSImageMD5(false); // Should still be able to start MiniDFSCluster cluster = new MiniDFSCluster.Builder(config) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .build(); try { cluster.waitActive(); } finally { cluster.shutdown(); } }
private FileChecksum writeUnencryptedAndThenRestartEncryptedCluster() throws IOException { cluster = new MiniDFSCluster.Builder(conf).build(); fs = getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); FileChecksum checksum = fs.getFileChecksum(TEST_PATH); fs.close(); cluster.shutdown(); setEncryptionConfigKeys(); cluster = new MiniDFSCluster.Builder(conf) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .format(false) .startupOption(StartupOption.REGULAR) .build(); fs = getFileSystem(conf); return checksum; }
/** * Attempts to start a NameNode with the given operation. Starting * the NameNode should throw an exception. */ void startNameNodeShouldFail(StartupOption operation) { try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .startupOption(operation) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .build(); // should fail throw new AssertionError("NameNode should have failed to start"); } catch (Exception expected) { // expected } }
/** * Create an instance of a newly configured cluster for testing that does * not manage its own directories or files */ private MiniDFSCluster createCluster() throws IOException { return new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .startupOption(StartupOption.UPGRADE) .build(); }
private static void upgradeAndVerify(final TestDFSUpgradeFromImage upgrade, final Configuration conf, final ClusterVerifier verifier) throws IOException{ upgrade.upgradeAndVerify(new MiniDFSCluster.Builder(conf) .numDataNodes(1) .manageDataDfsDirs(false) .manageNameDfsDirs(false), verifier); }
private MiniDFSCluster createCluster(Configuration c) throws IOException { return new MiniDFSCluster.Builder(c) .numDataNodes(0) .startupOption(StartupOption.REGULAR) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .build(); } /**
private MiniDFSCluster createCluster(Configuration c) throws IOException { return new MiniDFSCluster.Builder(c) .numDataNodes(0) .startupOption(StartupOption.REGULAR) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .build(); } /**
/** * Create an instance of a newly configured cluster for testing that does * not manage its own directories or files */ private MiniDFSCluster createCluster() throws IOException { return new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .startupOption(StartupOption.UPGRADE) .build(); }
public void setUpMiniCluster(Configuration conf, boolean manageNameDfsDirs) throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .manageNameDfsDirs(manageNameDfsDirs).checkExitOnShutdown(false).build(); cluster.waitActive(); fs = cluster.getFileSystem(); }