/** * Clean up directories with prefix "_distcp_logs-", which are generated when DistCp copying * hlogs. * @throws IOException exception */ protected void cleanupDistCpLog(BackupInfo backupInfo, Configuration conf) throws IOException { Path rootPath = new Path(backupInfo.getHLogTargetDir()).getParent(); FileStatus[] files = FSUtils.listStatus(fs, rootPath); if (files == null) { return; } for (FileStatus file : files) { if (file.getPath().getName().startsWith("_distcp_logs")) { LOG.debug("Delete log files of DistCp: " + file.getPath().getName()); FSUtils.delete(fs, file.getPath(), true); } } }
/** * Deletes files matching the table info file pattern within the given directory * whose sequenceId is at most the given max sequenceId. */ private static void deleteTableDescriptorFiles(FileSystem fs, Path dir, int maxSequenceId) throws IOException { FileStatus [] status = FSUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER); for (FileStatus file : status) { Path path = file.getPath(); int sequenceId = getTableInfoSequenceId(path); if (sequenceId <= maxSequenceId) { boolean success = FSUtils.delete(fs, path, false); if (success) { LOG.debug("Deleted " + path); } else { LOG.error("Failed to delete table descriptor at " + path); } } } }
/** * Clean up directories with prefix "exportSnapshot-", which are generated when exporting * snapshots. * @throws IOException exception */ protected static void cleanupExportSnapshotLog(Configuration conf) throws IOException { FileSystem fs = FSUtils.getCurrentFileSystem(conf); Path stagingDir = new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory() .toString())); FileStatus[] files = FSUtils.listStatus(fs, stagingDir); if (files == null) { return; } for (FileStatus file : files) { if (file.getPath().getName().startsWith("exportSnapshot-")) { LOG.debug("Delete log files of exporting snapshot: " + file.getPath().getName()); if (FSUtils.delete(fs, file.getPath(), true) == false) { LOG.warn("Can not delete " + file.getPath()); } } } }
private void unlockHbck() { if (isExclusive() && hbckLockCleanup.compareAndSet(true, false)) { RetryCounter retryCounter = lockFileRetryCounterFactory.create(); do { try { IOUtils.closeQuietly(hbckOutFd); FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true); LOG.info("Finishing hbck"); return; } catch (IOException ioe) { LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of " + retryCounter.getMaxAttempts()); LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe); try { retryCounter.sleepUntilNextRetry(); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); LOG.warn("Interrupted while deleting lock file" + HBCK_LOCK_PATH); return; } } } while (retryCounter.shouldRetry()); } }
FSUtils.delete(fs, tmpPath, true);
@After public void tearDown() throws Exception { try { FileSystem fs = UTIL.getTestFileSystem(); // cleanup each of the files/directories registered for (Path file : toCleanup) { // remove the table and archive directories FSUtils.delete(fs, file, true); } } catch (IOException e) { LOG.warn("Failure to delete archive directory", e); } finally { toCleanup.clear(); } // make sure that backups are off for all tables archivingClient.disableHFileBackup(); }
@Test public void testDeleteAndExists() throws Exception { final Path rootdir = htu.getDataTestDir(); final FileSystem fs = rootdir.getFileSystem(conf); conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true); FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY); // then that the correct file is created String file = htu.getRandomUUID().toString(); Path p = new Path(htu.getDataTestDir(), "temptarget" + File.separator + file); Path p1 = new Path(htu.getDataTestDir(), "temppath" + File.separator + file); try { FSDataOutputStream out = FSUtils.create(conf, fs, p, perms, null); out.close(); assertTrue("The created file should be present", FSUtils.isExists(fs, p)); // delete the file with recursion as false. Only the file will be deleted. FSUtils.delete(fs, p, false); // Create another file FSDataOutputStream out1 = FSUtils.create(conf, fs, p1, perms, null); out1.close(); // delete the file with recursion as false. Still the file only will be deleted FSUtils.delete(fs, p1, true); assertFalse("The created file should be present", FSUtils.isExists(fs, p1)); // and then cleanup } finally { FSUtils.delete(fs, p, true); FSUtils.delete(fs, p1, true); } }
FSUtils.delete(fs, rootdir, true);
/** * Removes .tableinfo files that are laid in pre-96 format (i.e., the tableinfo files are under * table directory). * @param tableName * @throws IOException */ private void removeTableInfoInPre96Format(TableName tableName) throws IOException { Path tableDir = FSUtils.getTableDir(rootDir, tableName); FileStatus[] status = FSUtils.listStatus(fs, tableDir, TABLEINFO_PATHFILTER); if (status == null) return; for (FileStatus fStatus : status) { FSUtils.delete(fs, fStatus.getPath(), false); } }
FSUtils.delete(fs, tmpPath, true);
/** * Deletes files matching the table info file pattern within the given directory * whose sequenceId is at most the given max sequenceId. */ private static void deleteTableDescriptorFiles(FileSystem fs, Path dir, int maxSequenceId) throws IOException { FileStatus [] status = FSUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER); for (FileStatus file : status) { Path path = file.getPath(); int sequenceId = getTableInfoSequenceId(path); if (sequenceId <= maxSequenceId) { boolean success = FSUtils.delete(fs, path, false); if (success) { LOG.debug("Deleted table descriptor at " + path); } else { LOG.error("Failed to delete descriptor at " + path); } } } }
FSUtils.delete(fs, tmpPath, true);
@After public void tearDown() throws Exception { try { FileSystem fs = UTIL.getTestFileSystem(); // cleanup each of the files/directories registered for (Path file : toCleanup) { // remove the table and archive directories FSUtils.delete(fs, file, true); } } catch (IOException e) { LOG.warn("Failure to delete archive directory", e); } finally { toCleanup.clear(); } // make sure that backups are off for all tables archivingClient.disableHFileBackup(); }
@Test public void testDeleteAndExists() throws Exception { final Path rootdir = htu.getDataTestDir(); final FileSystem fs = rootdir.getFileSystem(conf); conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true); FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY); // then that the correct file is created String file = htu.getRandomUUID().toString(); Path p = new Path(htu.getDataTestDir(), "temptarget" + File.separator + file); Path p1 = new Path(htu.getDataTestDir(), "temppath" + File.separator + file); try { FSDataOutputStream out = FSUtils.create(conf, fs, p, perms, null); out.close(); assertTrue("The created file should be present", FSUtils.isExists(fs, p)); // delete the file with recursion as false. Only the file will be deleted. FSUtils.delete(fs, p, false); // Create another file FSDataOutputStream out1 = FSUtils.create(conf, fs, p1, perms, null); out1.close(); // delete the file with recursion as false. Still the file only will be deleted FSUtils.delete(fs, p1, true); assertFalse("The created file should be present", FSUtils.isExists(fs, p1)); // and then cleanup } finally { FSUtils.delete(fs, p, true); FSUtils.delete(fs, p1, true); } }
private void unlockHbck() { if (isExclusive() && hbckLockCleanup.compareAndSet(true, false)) { RetryCounter retryCounter = lockFileRetryCounterFactory.create(); do { try { IOUtils.closeQuietly(hbckOutFd); FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true); LOG.info("Finishing hbck"); return; } catch (IOException ioe) { LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of " + retryCounter.getMaxAttempts()); LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe); try { retryCounter.sleepUntilNextRetry(); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); LOG.warn("Interrupted while deleting lock file" + HBCK_LOCK_PATH); return; } } } while (retryCounter.shouldRetry()); } }
FSUtils.delete(fs, rootdir, true);