/** * Checks to see if the specified file system is available * * @param fs filesystem * @throws IOException e */ public static void checkFileSystemAvailable(final FileSystem fs) throws IOException { if (!(fs instanceof DistributedFileSystem)) { return; } IOException exception = null; DistributedFileSystem dfs = (DistributedFileSystem) fs; try { if (dfs.exists(new Path("/"))) { return; } } catch (IOException e) { exception = e instanceof RemoteException ? ((RemoteException)e).unwrapRemoteException() : e; } try { fs.close(); } catch (Exception e) { LOG.error("file system close failed: ", e); } IOException io = new IOException("File system is not available"); io.initCause(exception); throw io; }
@Before public void setUp() throws Exception { this.conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); this.fs = TEST_UTIL.getDFSCluster().getFileSystem(); this.hbaseRootDir = FSUtils.getRootDir(this.conf); this.oldLogDir = new Path(this.hbaseRootDir, HConstants.HREGION_OLDLOGDIR_NAME); String serverName = ServerName.valueOf(currentTest.getMethodName() + "-manual", 16010, System.currentTimeMillis()) .toString(); this.logName = AbstractFSWALProvider.getWALDirectoryName(serverName); this.logDir = new Path(this.hbaseRootDir, logName); if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) { TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true); } this.wals = new WALFactory(conf, currentTest.getMethodName()); }
/** * Test that we can start and stop multiple time a cluster * with the same HBaseTestingUtility. */ @Test public void testMultipleStartStop() throws Exception{ HBaseTestingUtility htu1 = new HBaseTestingUtility(); Path foo = new Path("foo"); htu1.startMiniCluster(); htu1.getDFSCluster().getFileSystem().create(foo); assertTrue( htu1.getDFSCluster().getFileSystem().exists(foo)); htu1.shutdownMiniCluster(); htu1.startMiniCluster(); assertFalse( htu1.getDFSCluster().getFileSystem().exists(foo)); htu1.getDFSCluster().getFileSystem().create(foo); assertTrue( htu1.getDFSCluster().getFileSystem().exists(foo)); htu1.shutdownMiniCluster(); }
@Before public void setUp() throws Exception { this.conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); // this.cluster = TEST_UTIL.getDFSCluster(); this.fs = TEST_UTIL.getDFSCluster().getFileSystem(); this.hbaseRootDir = FSUtils.getRootDir(conf); this.hbaseWALRootDir = FSUtils.getWALRootDir(conf); this.oldLogDir = new Path(this.hbaseWALRootDir, HConstants.HREGION_OLDLOGDIR_NAME); String serverName = ServerName.valueOf(currentTest.getMethodName(), 16010, System.currentTimeMillis()).toString(); this.logDir = new Path(this.hbaseWALRootDir, AbstractFSWALProvider.getWALDirectoryName(serverName)); if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) { TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true); } if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseWALRootDir)) { TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseWALRootDir, true); } this.wals = new WALFactory(conf, serverName); }
@Override public boolean isFileExist(long path) { Path filePath = DFSUtilClient.makePathFromFileId(path); try { return nnc.getDistributedFileSystem().exists(filePath); } catch (IllegalArgumentException | IOException e) { LOG.warn("Exception while getting file is for the given path:{}", filePath, e); } return false; }
assertTrue(fs.exists(outputFilePath)); FSDataInputStream fsDataInputStream = fs.open(outputFilePath); BufferedReader reader = new BufferedReader(new InputStreamReader(fsDataInputStream));
if (fs.exists(idPath)) {
@Override boolean checkNamenodeBeforeReturn() throws Exception { final Path sPath = SnapshotTestHelper.getSnapshotRoot(new Path(dir), newName); boolean snapshotRenamed = dfs.exists(sPath); for (int i = 0; i < CHECKTIMES && !snapshotRenamed; i++) { Thread.sleep(1000); snapshotRenamed = dfs.exists(sPath); } return snapshotRenamed; }
@Override void prepare() throws Exception { Path p = new Path(target); if (!dfs.exists(p)) { expectedUpdateCount++; DFSTestUtil.createFile(dfs, p, BlockSize, DataNodes, 0); } }
@Override void prepare() throws Exception { final Path filePath = new Path(fileName); if (!dfs.exists(filePath)) { DFSTestUtil.createFile(dfs, filePath, BlockSize / 2, DataNodes, 0); } }
@Override void prepare() throws Exception { final Path filePath = new Path(oldName); if (!dfs.exists(filePath)) { DFSTestUtil.createFile(dfs, filePath, BlockSize, DataNodes, 0); } }
@Override void prepare() throws Exception { final Path filePath = new Path(oldName); if (!dfs.exists(filePath)) { DFSTestUtil.createFile(dfs, filePath, BlockSize, DataNodes, 0); } }
@After public void tearDown() throws IOException { if (fs.exists(new Path("/sub1"))) { if (fs.exists(new Path("/sub1/.snapshot"))) { for (FileStatus st : fs.listStatus(new Path("/sub1/.snapshot"))) { fs.deleteSnapshot(new Path("/sub1"), st.getPath().getName()); } fs.disallowSnapshot(new Path("/sub1")); } fs.delete(new Path("/sub1"), true); } }
@After public void cleanupMetaFolder() throws IOException { Path meta = new Path("/meta"); if (cluster.getFileSystem().exists(meta)) { cluster.getFileSystem().delete(meta, true); Assert.fail("Expected meta folder to be deleted"); } }