public DFSTestUtil build() { return new DFSTestUtil(nFiles, maxLevels, maxSize, minSize); } }
@Test public void testCrcCorruption() throws Exception { // // default parameters // System.out.println("TestCrcCorruption with default parameters"); Configuration conf1 = new HdfsConfiguration(); conf1.setInt("dfs.blockreport.intervalMsec", 3 * 1000); DFSTestUtil util1 = new DFSTestUtil("TestCrcCorruption", 40, 3, 8*1024); thistest(conf1, util1); // // specific parameters // System.out.println("TestCrcCorruption with specific parameters"); Configuration conf2 = new HdfsConfiguration(); conf2.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 17); conf2.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 34); DFSTestUtil util2 = new DFSTestUtil("TestCrcCorruption", 40, 3, 400); thistest(conf2, util2); }
DFSTestUtil util = new DFSTestUtil("testCorruptFilesCorruptedBlock", 2, 1, 512); util.createFiles(fs, "/srcdat10");
DFSTestUtil util = new DFSTestUtil("testMaxCorruptFiles", maxCorruptFileBlocks * 3, 1, 512); util.createFiles(fs, "/srcdat2", (short) 1);
/** check if DFS can handle corrupted blocks properly */ public void testFileCorruption() throws Exception { MiniDFSCluster cluster = null; DFSTestUtil util = new DFSTestUtil("TestFileCorruption", 20, 3, 8*1024); try { Configuration conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); FileSystem fs = cluster.getFileSystem(); util.createFiles(fs, "/srcdat"); // Now deliberately remove the blocks File data_dir = new File(System.getProperty("test.build.data"), "dfs/data/data5/current"); assertTrue("data directory does not exist", data_dir.exists()); File[] blocks = data_dir.listFiles(); assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0)); for (int idx = 0; idx < blocks.length; idx++) { if (!blocks[idx].getName().startsWith("blk_")) { continue; } System.out.println("Deliberately removing file "+blocks[idx].getName()); assertTrue("Cannot remove file.", blocks[idx].delete()); } assertTrue("Corrupted replicas not handled properly.", util.checkFiles(fs, "/srcdat")); util.cleanup(fs, "/srcdat"); } finally { if (cluster != null) { cluster.shutdown(); } } }
cluster.waitActive(); fs = cluster.getFileSystem(); DFSTestUtil util = new DFSTestUtil("testGetCorruptFiles", 3, 1, 1024); util.createFiles(fs, "/corruptData", (short) 1); util.waitReplication(fs, "/corruptData", (short) 1);
public void testFsckNonExistent() throws Exception { DFSTestUtil util = new DFSTestUtil("TestFsck", 20, 3, 8*1024); MiniDFSCluster cluster = null; FileSystem fs = null; try { Configuration conf = new HdfsConfiguration(); conf.setLong("dfs.blockreport.intervalMsec", 10000L); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); fs = cluster.getFileSystem(); util.createFiles(fs, "/srcdat"); util.waitReplication(fs, "/srcdat", (short)3); String outStr = runFsck(conf, 0, true, "/non-existent"); assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS)); System.out.println(outStr); util.cleanup(fs, "/srcdat"); } finally { if (fs != null) {try{fs.close();} catch(Exception e){}} if (cluster != null) { cluster.shutdown(); } } }
final DFSTestUtil util = new DFSTestUtil(getClass().getSimpleName(), 20, 3, 8*1024); final Configuration conf = new HdfsConfiguration(); conf.setLong("dfs.blockreport.intervalMsec", 10000L);
public void testFsckOpenFiles() throws Exception { DFSTestUtil util = new DFSTestUtil("TestFsck", 4, 3, 8*1024); MiniDFSCluster cluster = null; FileSystem fs = null;
public void testFsckMove() throws Exception { DFSTestUtil util = new DFSTestUtil("TestFsck", 5, 3, 8*1024); MiniDFSCluster cluster = null; FileSystem fs = null;
cluster.waitActive(); fs = cluster.getFileSystem(); DFSTestUtil util = new DFSTestUtil("testGetCorruptFiles", 3, 1, 1024); util.createFiles(fs, "/corruptData");
DFSTestUtil util = new DFSTestUtil("TestFsck", 20, 3, 8*1024); MiniDFSCluster cluster = null; FileSystem fs = null;
@Test public void testFinalizedReplicas() throws Exception { // bring up a cluster of 3 Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L); conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); try { // test finalized replicas final String TopDir = "/test"; DFSTestUtil util = new DFSTestUtil("TestCrcCorruption", 2, 3, 8*1024); util.createFiles(fs, TopDir, (short)3); util.waitReplication(fs, TopDir, (short)3); util.checkFiles(fs, TopDir); cluster.restartDataNodes(); cluster.waitActive(); util.checkFiles(fs, TopDir); } finally { cluster.shutdown(); } }
public void runTests(Configuration conf, boolean serviceTest) throws Exception { MiniDFSCluster cluster = null; DFSTestUtil files = new DFSTestUtil("TestRestartDFS", 20, 3, 8*1024);
MiniDFSCluster cluster = null; FSNamesystem fsn = null; DFSTestUtil files = new DFSTestUtil("TestRestartDFS", 200, 3, 8*1024);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); DFSTestUtil util = new DFSTestUtil("TestFsck", 5, 3, (5 * DFS_BLOCK_SIZE) + (DFS_BLOCK_SIZE - 1), 5 * DFS_BLOCK_SIZE); MiniDFSCluster cluster = null;