@After public void shutDown() throws IOException { fs.delete(new Path(baseFolder.toString()), true); }
fs.delete(idPath, true);
@After public void tearDown() throws IOException { if (fs.exists(new Path("/sub1"))) { if (fs.exists(new Path("/sub1/.snapshot"))) { for (FileStatus st : fs.listStatus(new Path("/sub1/.snapshot"))) { fs.deleteSnapshot(new Path("/sub1"), st.getPath().getName()); } fs.disallowSnapshot(new Path("/sub1")); } fs.delete(new Path("/sub1"), true); } }
@After public void tearDown() throws Exception { FileStatus[] entries = FS.listStatus(new Path("/")); for (FileStatus dir : entries) { FS.delete(dir.getPath(), true); } }
@After public void tearDown() throws Exception { try { wals.shutdown(); } catch (IOException exception) { // one of our tests splits out from under our wals. LOG.warn("Ignoring failure to close wal factory. " + exception.getMessage()); LOG.debug("details of failure to close wal factory.", exception); } TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true); TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseWALRootDir, true); }
@AfterClass public static void destroyHDFS() throws Exception { if (hdfsCluster != null) { hdfsCluster.getFileSystem().delete(new org.apache.hadoop.fs.Path(basePath.toUri()), true); hdfsCluster.shutdown(); } }
@AfterClass public static void destroyHDFS() throws Exception { if (hdfsCluster != null) { hdfsCluster.getFileSystem().delete(new org.apache.hadoop.fs.Path(basePath.toUri()), true); hdfsCluster.shutdown(); } }
@SuppressWarnings("JUnit4TearDownNotRun") public void tearDown() throws Exception { store.stop(false); UTIL.getDFSCluster().getFileSystem().delete(store.getWALDir(), true); try { UTIL.shutdownMiniCluster(); } catch (Exception e) { LOG.warn("failure shutting down cluster", e); } }
@After public void tearDown() throws Exception { this.wals.close(); TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true); }
@Before public void setUp() throws Exception { this.conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); this.fs = TEST_UTIL.getDFSCluster().getFileSystem(); this.hbaseRootDir = FSUtils.getRootDir(this.conf); this.oldLogDir = new Path(this.hbaseRootDir, HConstants.HREGION_OLDLOGDIR_NAME); String serverName = ServerName.valueOf(currentTest.getMethodName() + "-manual", 16010, System.currentTimeMillis()) .toString(); this.logName = AbstractFSWALProvider.getWALDirectoryName(serverName); this.logDir = new Path(this.hbaseRootDir, logName); if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) { TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true); } this.wals = new WALFactory(conf, currentTest.getMethodName()); }
@Before public void setUp() throws Exception { this.conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); // this.cluster = TEST_UTIL.getDFSCluster(); this.fs = TEST_UTIL.getDFSCluster().getFileSystem(); this.hbaseRootDir = FSUtils.getRootDir(conf); this.hbaseWALRootDir = FSUtils.getWALRootDir(conf); this.oldLogDir = new Path(this.hbaseWALRootDir, HConstants.HREGION_OLDLOGDIR_NAME); String serverName = ServerName.valueOf(currentTest.getMethodName(), 16010, System.currentTimeMillis()).toString(); this.logDir = new Path(this.hbaseWALRootDir, AbstractFSWALProvider.getWALDirectoryName(serverName)); if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) { TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true); } if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseWALRootDir)) { TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseWALRootDir, true); } this.wals = new WALFactory(conf, serverName); }
@Override public void close() { keyManager.close(); // close the output file IOUtils.closeStream(out); if (fs != null) { try { if (checkOtherInstanceRunning) { fs.delete(idPath, true); } } catch(IOException ioe) { LOG.warn("Failed to delete " + idPath, ioe); } } }
/** * Get rid of Path f, whether a true file or dir. */ @Deprecated public boolean delete(Path f) throws IOException { return delete(f, true); }
private void changeData4(Path dir) throws Exception { final Path d1 = new Path(dir, "d1"); final Path d11 = new Path(dir, "d11"); final Path d2 = new Path(d1, "d2"); final Path d21 = new Path(d1, "d21"); final Path f1 = new Path(d2, "f1"); dfs.delete(f1, false); dfs.rename(d2, d21); dfs.rename(d1, d11); }
@After public void tearDown() throws Exception { FileStatus[] entries = FS.listStatus(new Path("/")); for (FileStatus dir : entries) { FS.delete(dir.getPath(), true); } }
@After public void tearDown() throws Exception { try { wals.shutdown(); } catch (IOException exception) { // one of our tests splits out from under our wals. LOG.warn("Ignoring failure to close wal factory. " + exception.getMessage()); LOG.debug("details of failure to close wal factory.", exception); } TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true); TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseWALRootDir, true); }
@After public void cleanupMetaFolder() throws IOException { Path meta = new Path("/meta"); if (cluster.getFileSystem().exists(meta)) { cluster.getFileSystem().delete(meta, true); Assert.fail("Expected meta folder to be deleted"); } }
@SuppressWarnings("JUnit4TearDownNotRun") public void tearDown() throws Exception { store.stop(false); UTIL.getDFSCluster().getFileSystem().delete(store.getWALDir(), true); try { UTIL.shutdownMiniCluster(); } catch (Exception e) { LOG.warn("failure shutting down cluster", e); } }
@After public void tearDown() throws Exception { this.wals.close(); TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true); }