@Override public void undo(long tid, Master env) throws Exception { env.getFileSystem().deleteRecursively(new Path(tableInfo.importDir)); } }
private long removeFile(Path path) { try { if (!useTrash || !fs.moveToTrash(path)) { fs.deleteRecursively(path); } return 1; } catch (FileNotFoundException ex) { // ignored } catch (IOException ex) { log.error("Unable to delete wal {}", path, ex); } return 0; }
@Override public void undo(long tid, Master master) throws Exception { VolumeManager fs = master.getFileSystem(); fs.deleteRecursively(new Path(tableInfo.getSplitDirsFile())); }
@Override public void undo(long tid, Master master) throws Exception { VolumeManager fs = master.getFileSystem(); fs.deleteRecursively(new Path(tableInfo.defaultTabletDir)); if (tableInfo.getInitialSplitSize() > 0) { SortedSet<Text> dirInfo = Utils .getSortedSetFromFile(master.getInputStream(tableInfo.getSplitDirsFile()), false); for (Text dirname : dirInfo) { fs.deleteRecursively(new Path(dirname.toString())); } } }
public static void finishReplacement(AccumuloConfiguration acuTableConf, VolumeManager fs, Path location, Set<FileRef> oldDatafiles, String compactName) throws IOException { // start deleting files, if we do not finish they will be cleaned // up later for (FileRef ref : oldDatafiles) { Path path = ref.path(); Path deleteFile = new Path(location + "/delete+" + compactName + "+" + path.getName()); if (acuTableConf.getBoolean(Property.GC_TRASH_IGNORE) || !fs.moveToTrash(deleteFile)) fs.deleteRecursively(deleteFile); } }
if (fs.exists(new Path(expectedCompactedFile))) { if (!fs.deleteRecursively(file.getPath())) log.warn("Delete of file: {} return false", file.getPath()); continue; if (deleteTmp) { log.warn("cleaning up old tmp file: {}", path); if (!fs.deleteRecursively(file.getPath())) log.warn("Delete of tmp file: {} return false", file.getPath());
protected static void cleanupIndexOp(Path tmpDir, VolumeManager fs, ArrayList<FileSKVIterator> readers) throws IOException { // close all of the index sequence files for (FileSKVIterator r : readers) { try { if (r != null) r.close(); } catch (IOException e) { // okay, try to close the rest anyway log.error("{}", e.getMessage(), e); } } if (tmpDir != null) { Volume v = fs.getVolumeByPath(tmpDir); if (v.getFileSystem().exists(tmpDir)) { fs.deleteRecursively(tmpDir); return; } log.error("Did not delete tmp dir because it wasn't a tmp dir {}", tmpDir); } }
@Override public Repo<Master> call(long tid, Master env) throws Exception { env.getFileSystem().deleteRecursively(new Path(tableInfo.importDir, "mappings.txt")); env.getTableManager().transitionTableState(tableInfo.tableId, TableState.ONLINE); Utils.unreserveNamespace(env, tableInfo.namespaceId, tid, false); Utils.unreserveTable(env, tableInfo.tableId, tid, true); Utils.unreserveHdfsDirectory(env, new Path(tableInfo.exportDir).toString(), tid); env.getEventCoordinator().event("Imported table %s ", tableInfo.tableName); LoggerFactory.getLogger(FinishImportTable.class) .debug("Imported table " + tableInfo.tableId + " " + tableInfo.tableName); return null; }
fs.deleteRecursively(new Path(error, BulkImport.FAILURES_TXT)); return new CleanUpBulkImport(tableId, source, bulk, error);
if (moveToTrash(fullPath) || fs.deleteRecursively(fullPath)) {
fs.deleteRecursively(new Path(destPath));
if (!fs.deleteRecursively(outputFile.path())) { if (fs.exists(outputFile.path())) { log.error("Unable to delete {}", outputFile); mfw.close(); } finally { if (!fs.deleteRecursively(outputFile.path())) if (fs.exists(outputFile.path())) log.error("Unable to delete {}", outputFile);
getFileSystem().deleteRecursively(new Path(outputFileName));
tablet.getTabletServer().getFileSystem().deleteRecursively(newDatafile.path());
try { if (dfv.getNumEntries() == 0) { tablet.getTabletServer().getFileSystem().deleteRecursively(tmpDatafile.path()); } else { if (tablet.getTabletServer().getFileSystem().exists(newDatafile.path())) { log.warn("Target map file already exist {}", newDatafile); tablet.getTabletServer().getFileSystem().deleteRecursively(newDatafile.path());
VolumeManager fs = master.getFileSystem(); for (String dir : ServerConstants.getTablesDirs(master.getContext())) { fs.deleteRecursively(new Path(dir, tableId.canonicalID()));
log.error("{}", e.getMessage(), e); fs.deleteRecursively(outputFile.path()); } catch (Exception e) { log.warn("Failed to delete Canceled compaction output file {}", outputFile, e);
@Override public void undo(long tid, Master env) throws Exception { env.getFileSystem().deleteRecursively(new Path(tableInfo.importDir)); } }
@Override public void undo(long tid, Master master) throws Exception { VolumeManager fs = master.getFileSystem(); fs.deleteRecursively(new Path(tableInfo.dir)); } }
public static void finishReplacement(AccumuloConfiguration acuTableConf, VolumeManager fs, Path location, Set<FileRef> oldDatafiles, String compactName) throws IOException { // start deleting files, if we do not finish they will be cleaned // up later for (FileRef ref : oldDatafiles) { Path path = ref.path(); Path deleteFile = new Path(location + "/delete+" + compactName + "+" + path.getName()); if (acuTableConf.getBoolean(Property.GC_TRASH_IGNORE) || !fs.moveToTrash(deleteFile)) fs.deleteRecursively(deleteFile); } }