/** * Attempts to start a NameNode with the given operation. Starting * the NameNode should throw an exception. */ void startNameNodeShouldFail(StartupOption operation, String searchString) { try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .startupOption(operation) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .build(); // should fail throw new AssertionError("NameNode should have failed to start"); } catch (Exception expected) { if (!expected.getMessage().contains(searchString)) { fail("Expected substring '" + searchString + "' in exception " + "but got: " + StringUtils.stringifyException(expected)); } // expected } }
@Test // Upgrade from LDir-based layout to block ID-based layout -- change described // in HDFS-6482 public void testUpgradeToIdBasedLayout() throws IOException { TestDFSUpgradeFromImage upgrade = new TestDFSUpgradeFromImage(); upgrade.unpackStorage(HADOOP24_DATANODE, HADOOP_DATANODE_DIR_TXT); Configuration conf = new Configuration(TestDFSUpgradeFromImage.upgradeConf); conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, System.getProperty("test.build.data") + File.separator + "dfs" + File.separator + "data"); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, System.getProperty("test.build.data") + File.separator + "dfs" + File.separator + "name"); upgrade.upgradeAndVerify(new MiniDFSCluster.Builder(conf).numDataNodes(1) .manageDataDfsDirs(false).manageNameDfsDirs(false), null); } }
cluster = new MiniDFSCluster.Builder(config).manageDataDfsDirs(false) .manageNameDfsDirs(false) .build();
.manageDataDfsDirs(false) .manageNameDfsDirs(false).build(); cluster.waitActive();
FSDataOutputStream out = null; try { cluster = new MiniDFSCluster.Builder(conf).manageDataDfsDirs(true) .manageNameDfsDirs(true).numDataNodes(4) .racks(new String[] { "/rack1", "/rack1", "/rack2", "/rack2" })
.manageDataDfsDirs(false) .manageNameDfsDirs(false) .numDataNodes(0)
cluster = new MiniDFSCluster.Builder(conf) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .startupOption(StartupOption.REGULAR)
.startupOption(operation) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false)
/** * start with -importCheckpoint option and verify that the files are in separate directories and of the right length * @throws IOException */ private void checkNameNodeFiles() throws IOException{ // start namenode with import option LOG.info("-- about to start DFS cluster"); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(config) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .startupOption(IMPORT).build(); cluster.waitActive(); LOG.info("--NN started with checkpoint option"); NameNode nn = cluster.getNameNode(); assertNotNull(nn); // Verify that image file sizes did not change. FSImage image = nn.getFSImage(); verifyDifferentDirs(image, this.fsimageLength, this.editsLength); } finally { if(cluster != null) cluster.shutdown(); } }
/** * start with -importCheckpoint option and verify that the files are in separate directories and of the right length * @throws IOException */ private void checkNameNodeFiles() throws IOException{ // start namenode with import option LOG.info("-- about to start DFS cluster"); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(config) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .startupOption(IMPORT).build(); cluster.waitActive(); LOG.info("--NN started with checkpoint option"); NameNode nn = cluster.getNameNode(); assertNotNull(nn); // Verify that image file sizes did not change. FSImage image = nn.getFSImage(); verifyDifferentDirs(image, this.fsimageLength, this.editsLength); } finally { if(cluster != null) cluster.shutdown(); } }
/** * Attempts to start a NameNode with the given operation. Starting * the NameNode should throw an exception. */ void startNameNodeShouldFail(String searchString) { try { NameNode.doRollback(conf, false); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .build(); // should fail throw new AssertionError("NameNode should have failed to start"); } catch (Exception expected) { if (!expected.getMessage().contains(searchString)) { fail("Expected substring '" + searchString + "' in exception " + "but got: " + StringUtils.stringifyException(expected)); } // expected } }
@Test(timeout=30000) public void testCorruptImageFallback() throws IOException { // Create two checkpoints createCheckPoint(2); // Delete a single md5sum corruptFSImageMD5(false); // Should still be able to start MiniDFSCluster cluster = new MiniDFSCluster.Builder(config) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .build(); try { cluster.waitActive(); } finally { cluster.shutdown(); } }
private FileChecksum writeUnencryptedAndThenRestartEncryptedCluster() throws IOException { cluster = new MiniDFSCluster.Builder(conf).build(); fs = getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); FileChecksum checksum = fs.getFileChecksum(TEST_PATH); fs.close(); cluster.shutdown(); setEncryptionConfigKeys(); cluster = new MiniDFSCluster.Builder(conf) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .format(false) .startupOption(StartupOption.REGULAR) .build(); fs = getFileSystem(conf); return checksum; }
/** * Attempts to start a NameNode with the given operation. Starting * the NameNode should throw an exception. */ void startNameNodeShouldFail(StartupOption operation) { try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .startupOption(operation) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .build(); // should fail throw new AssertionError("NameNode should have failed to start"); } catch (Exception expected) { // expected } }
/** * Test upgrade from a 1.x image with some blocksBeingWritten */ @Test public void testUpgradeFromRel1BBWImage() throws IOException { unpackStorage(HADOOP1_BBW_IMAGE, HADOOP_DFS_DIR_TXT); Configuration conf = new Configuration(upgradeConf); conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, System.getProperty("test.build.data") + File.separator + "dfs" + File.separator + "data" + File.separator + "data1"); upgradeAndVerify(new MiniDFSCluster.Builder(conf). numDataNodes(1).enableManagedDfsDirsRedundancy(false). manageDataDfsDirs(false), null); }
/** * Create an instance of a newly configured cluster for testing that does * not manage its own directories or files */ private MiniDFSCluster createCluster() throws IOException { return new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .startupOption(StartupOption.UPGRADE) .build(); }
private static void upgradeAndVerify(final TestDFSUpgradeFromImage upgrade, final Configuration conf, final ClusterVerifier verifier) throws IOException{ upgrade.upgradeAndVerify(new MiniDFSCluster.Builder(conf) .numDataNodes(1) .manageDataDfsDirs(false) .manageNameDfsDirs(false), verifier); }
private MiniDFSCluster createCluster(Configuration c) throws IOException { return new MiniDFSCluster.Builder(c) .numDataNodes(0) .startupOption(StartupOption.REGULAR) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .build(); } /**
private MiniDFSCluster createCluster(Configuration c) throws IOException { return new MiniDFSCluster.Builder(c) .numDataNodes(0) .startupOption(StartupOption.REGULAR) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .build(); } /**
/** * Create an instance of a newly configured cluster for testing that does * not manage its own directories or files */ private MiniDFSCluster createCluster() throws IOException { return new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .startupOption(StartupOption.UPGRADE) .build(); }