Path p = new Path("preadtest.dat"); DFSTestUtil.createFile(fileSys, p, 12 * blockSize, 12 * blockSize, blockSize, (short) 3, seed); pReadFile(fileSys, p);
private static void writeDataAndSetReplication(FileSystem fileSys, Path name, OutputStream out, short replication, int numBlocks) throws IOException { for (int i = 0; i < numBlocks; i++) { out.write(databuf); } out.close(); DFSTestUtil.waitReplication(fileSys, name, replication); }
public void run(FileSystem fs) throws IOException { DFSTestUtil.appendFile(fs, file1, "new bytes"); }});
public void testFsckNonExistent() throws Exception { DFSTestUtil util = new DFSTestUtil("TestFsck", 20, 3, 8*1024); MiniDFSCluster cluster = null; FileSystem fs = null; try { Configuration conf = new HdfsConfiguration(); conf.setLong("dfs.blockreport.intervalMsec", 10000L); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); fs = cluster.getFileSystem(); util.createFiles(fs, "/srcdat"); util.waitReplication(fs, "/srcdat", (short)3); String outStr = runFsck(conf, 0, true, "/non-existent"); assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS)); System.out.println(outStr); util.cleanup(fs, "/srcdat"); } finally { if (fs != null) {try{fs.close();} catch(Exception e){}} if (cluster != null) { cluster.shutdown(); } } }
Path excludeFile = new Path(dir, "exclude"); assertTrue(localFileSys.mkdirs(dir)); DFSTestUtil.writeFile(localFileSys, excludeFile, ""); conf.set("dfs.hosts.exclude", excludeFile.toUri().getPath()); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); Block b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); fs.getFileStatus(filePath), 0, Long.MAX_VALUE); String name = locs[0].getNames()[0]; DFSTestUtil.writeFile(localFileSys, excludeFile, name); ns.refreshNodes(conf); DFSTestUtil.waitForDecommission(fs, name); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); } finally { cluster.shutdown();
private void createFile(long fileLen, short replicationFactor) throws IOException { FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, fileLen, replicationFactor, r.nextLong()); DFSTestUtil.waitReplication(fs, filePath, replicationFactor); }
private int changeData7(Path dir) throws Exception { final Path foo = new Path(dir, "foo"); final Path foo2 = new Path(dir, "foo2"); final Path foo_f1 = new Path(foo, "f1"); final Path foo2_f2 = new Path(foo2, "f2"); final Path foo_d1 = new Path(foo, "d1"); final Path foo_d1_f3 = new Path(foo_d1, "f3"); int numCreatedModified = 0; dfs.rename(foo, foo2); DFSTestUtil.createFile(dfs, foo_f1, BLOCK_SIZE, DATA_NUM, 0L); numCreatedModified += 2; // create ./foo and ./foo/f1 DFSTestUtil.appendFile(dfs, foo_f1, (int) BLOCK_SIZE); dfs.rename(foo_f1, foo2_f2); numCreatedModified -= 1; // mv ./foo/f1 numCreatedModified += 2; // "M ./foo" and "+ ./foo/f2" DFSTestUtil.createFile(dfs, foo_d1_f3, BLOCK_SIZE, DATA_NUM, 0L); numCreatedModified += 2; // create ./foo/d1 and ./foo/d1/f3 return numCreatedModified; }
cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); util.createFiles(fs, "/srcdat", replFactor); util.waitReplication(fs, "/srcdat", (short)2); util.checkFiles(fs, "/srcdat")); System.out.println("All File still have a valid replica"); util.setReplication(fs, "/srcdat", (short)1); " pending HADOOP-1557"); util.cleanup(fs, "/srcdat"); } finally { if (cluster != null) { cluster.shutdown(); }
@Test public void testFsckNonExistent() throws Exception { DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck"). setNumFiles(20).build(); MiniDFSCluster cluster = null; FileSystem fs = null; try { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); fs = cluster.getFileSystem(); util.createFiles(fs, "/srcdat"); util.waitReplication(fs, "/srcdat", (short)3); String outStr = runFsck(conf, 0, true, "/non-existent"); assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS)); System.out.println(outStr); util.cleanup(fs, "/srcdat"); } finally { if (fs != null) {try{fs.close();} catch(Exception e){}} if (cluster != null) { cluster.shutdown(); } } }
/** check if DFS can handle corrupted blocks properly */ public void testFileCorruption() throws Exception { MiniDFSCluster cluster = null; DFSTestUtil util = new DFSTestUtil("TestFileCorruption", 20, 3, 8*1024); try { Configuration conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); FileSystem fs = cluster.getFileSystem(); util.createFiles(fs, "/srcdat"); // Now deliberately remove the blocks File data_dir = new File(System.getProperty("test.build.data"), "dfs/data/data5/current"); assertTrue("data directory does not exist", data_dir.exists()); File[] blocks = data_dir.listFiles(); assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0)); for (int idx = 0; idx < blocks.length; idx++) { if (!blocks[idx].getName().startsWith("blk_")) { continue; } System.out.println("Deliberately removing file "+blocks[idx].getName()); assertTrue("Cannot remove file.", blocks[idx].delete()); } assertTrue("Corrupted replicas not handled properly.", util.checkFiles(fs, "/srcdat")); util.cleanup(fs, "/srcdat"); } finally { if (cluster != null) { cluster.shutdown(); } } }
@Test public void testFinalizedReplicas() throws Exception { // bring up a cluster of 3 Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L); conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); try { // test finalized replicas final String TopDir = "/test"; DFSTestUtil util = new DFSTestUtil("TestCrcCorruption", 2, 3, 8*1024); util.createFiles(fs, TopDir, (short)3); util.waitReplication(fs, TopDir, (short)3); util.checkFiles(fs, TopDir); cluster.restartDataNodes(); cluster.waitActive(); util.checkFiles(fs, TopDir); } finally { cluster.shutdown(); } }
setName("testCorruptFilesCorruptedBlock").setNumFiles(2). setMaxLevels(1).setMaxSize(512).build(); util.createFiles(fs, "/srcdat10"); util.checkFiles(fs, "/srcdat10"); } catch (BlockMissingException e) { System.out.println("Received BlockMissingException as expected."); assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.", badFiles.size() == 1); util.cleanup(fs, "/srcdat10"); } finally { if (cluster != null) { cluster.shutdown(); }
@Test(expected = IOException.class) public void testBackupPathIsNotAccessible() throws Exception { Path path = new Path(PERMISSION_TEST_PATH); FileSystem rootFs = FileSystem.get(TEST_UTIL.getConnection().getConfiguration()); rootFs.mkdirs(path.getParent()); rootFs.setPermission(path.getParent(), FsPermission.createImmutable((short) 000)); FileSystem fs = DFSTestUtil.getFileSystemAs(DIANA, TEST_UTIL.getConnection().getConfiguration()); fs.mkdirs(path); } }
@Test public void testFinalizedReplicas() throws Exception { // bring up a cluster of 3 Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L); conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); try { // test finalized replicas final String TopDir = "/test"; DFSTestUtil util = new DFSTestUtil.Builder(). setName("TestDatanodeRestart").setNumFiles(2).build(); util.createFiles(fs, TopDir, (short)3); util.waitReplication(fs, TopDir, (short)3); util.checkFiles(fs, TopDir); cluster.restartDataNodes(); cluster.waitActive(); util.checkFiles(fs, TopDir); } finally { cluster.shutdown(); } }
@Before public void setupCluster() throws Exception { // must configure prior to instantiating the namesystem because it // will reconfigure the logger if async is enabled configureAuditLogs(); conf = new HdfsConfiguration(); final long precision = 1L; conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, useAsyncLog); util = new DFSTestUtil.Builder().setName("TestAuditAllowed"). setNumFiles(20).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); fs = cluster.getFileSystem(); util.createFiles(fs, fileName); // make sure the appender is what it's supposed to be Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); @SuppressWarnings("unchecked") List<Appender> appenders = Collections.list(logger.getAllAppenders()); assertEquals(1, appenders.size()); assertEquals(useAsyncLog, appenders.get(0) instanceof AsyncAppender); fnames = util.getFileNames(fileName); util.waitReplication(fs, fileName, (short)3); userGroupInfo = UserGroupInformation.createUserForTesting(username, groups); }
Map<String, String []> u2g_map = new HashMap<String, String []>(1); u2g_map.put(fakeUsername, new String[] {fakeGroup}); DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map); new String [] { fakeGroup}); FileSystem dfs2 = DFSTestUtil.getFileSystemAs(ugi, conf);
@Before public void setup() throws Exception { Configuration hdfsConf = new HdfsConfiguration(); hdfsConf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0); String namenodeDir = new File(MiniDFSCluster.getBaseDirectory(), "name"). getAbsolutePath(); hdfsConf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeDir); hdfsConf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeDir); hdfsConf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE); cluster = new MiniDFSCluster.Builder(hdfsConf).numDataNodes(15).build(); fs = cluster.getFileSystem(); fs.enableErasureCodingPolicy(ecPolicy.getName()); fs.setErasureCodingPolicy(new Path("/"), ecPolicy.getName()); cluster.waitActive(); conf = new Configuration(); submitDir = new Path("/"); testFile = new Path("/testfile"); DFSTestUtil.writeFile(fs, testFile, StripedFileTestUtil.generateBytes(BLOCKSIZE)); conf.set(FileInputFormat.INPUT_DIR, fs.getUri().toString() + testFile.toString()); }
private static void testViewingFile(MiniDFSCluster cluster, String filePath, boolean doTail) throws IOException { FileSystem fs = cluster.getFileSystem(); Path testPath = new Path(filePath); if (!fs.exists(testPath)) { DFSTestUtil.writeFile(fs, testPath, FILE_DATA); } InetSocketAddress nnHttpAddress = cluster.getNameNode().getHttpAddress(); int dnInfoPort = cluster.getDataNodes().get(0).getInfoPort(); String jspName = doTail ? "tail.jsp" : "browseDirectory.jsp"; String fileParamName = doTail ? "filename" : "dir"; URL url = new URL("http://localhost:" + dnInfoPort + "/" + jspName + JspHelper.getUrlParam(fileParamName, URLEncoder.encode(testPath.toString(), "UTF-8"), true) + JspHelper.getUrlParam("namenodeInfoPort", Integer.toString(nnHttpAddress.getPort()))); String viewFilePage = DFSTestUtil.urlGet(url); assertTrue("page should show preview of file contents", viewFilePage.contains(FILE_DATA)); if (!doTail) { assertTrue("page should show link to download file", viewFilePage .contains("/streamFile" + URIUtil.encodePath(testPath.toString()))); } }
@BeforeClass public static void setUpBeforeClass() throws Exception { conf = TEST_UTIL.getConfiguration(); conf.setClass("hbase.regionserver.hlog.writer.impl", InstrumentedLogWriter.class, Writer.class); // This is how you turn off shortcircuit read currently. TODO: Fix. Should read config. System.setProperty("hbase.tests.use.shortcircuit.reads", "false"); // Create fake maping user to group and set it to the conf. Map<String, String []> u2g_map = new HashMap<>(2); ROBBER = User.getCurrent().getName() + "-robber"; ZOMBIE = User.getCurrent().getName() + "-zombie"; u2g_map.put(ROBBER, GROUP); u2g_map.put(ZOMBIE, GROUP); DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map); conf.setInt("dfs.heartbeat.interval", 1); TEST_UTIL.startMiniDFSCluster(2); }