Path oldPath = fs.getFullPath(FileType.TABLE, "/" + MetadataTable.ID + "/root_tablet"); if (fs.exists(oldPath)) { VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(RootTable.ID, context); String newPath = fs.choose(chooserEnv, ServerConstants.getBaseUris(context)) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + RootTable.ID; fs.mkdirs(new Path(newPath)); if (!fs.rename(oldPath, new Path(newPath))) { throw new IOException("Failed to move root tablet from " + oldPath + " to " + newPath); if (fs.exists(path)) { if (location != null) { throw new IllegalStateException(
String volume = fs.choose(chooserEnv, ServerConstants.getBaseUris(context)) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR; lowDirectory = Constants.DEFAULT_TABLET_LOCATION; Path lowDirectoryPath = new Path(volume + "/" + tableId + "/" + lowDirectory); if (fs.exists(lowDirectoryPath) || fs.mkdirs(lowDirectoryPath)) { FileSystem pathFs = fs.getVolumeByPath(lowDirectoryPath).getFileSystem(); return lowDirectoryPath.makeQualified(pathFs.getUri(), pathFs.getWorkingDirectory()) .toString(); lowDirectory = "/" + Constants.GENERATED_TABLET_DIRECTORY_PREFIX + namer.getNextName(); Path lowDirectoryPath = new Path(volume + "/" + tableId + "/" + lowDirectory); if (fs.exists(lowDirectoryPath)) throw new IllegalStateException("Dir exist when it should not " + lowDirectoryPath); if (fs.mkdirs(lowDirectoryPath)) { FileSystem lowDirectoryFs = fs.getVolumeByPath(lowDirectoryPath).getFileSystem(); return lowDirectoryPath .makeQualified(lowDirectoryFs.getUri(), lowDirectoryFs.getWorkingDirectory())
public static synchronized void updateAccumuloVersion(VolumeManager fs, int oldVersion) { for (Volume volume : fs.getVolumes()) { try { if (getAccumuloPersistentVersion(volume) == oldVersion) { log.debug("Attempting to upgrade {}", volume); Path dataVersionLocation = ServerConstants.getDataVersionLocation(volume); fs.create(new Path(dataVersionLocation, Integer.toString(ServerConstants.DATA_VERSION))) .close(); // TODO document failure mode & recovery if FS permissions cause above to work and below // to fail ACCUMULO-2596 Path prevDataVersionLoc = new Path(dataVersionLocation, Integer.toString(oldVersion)); if (!fs.delete(prevDataVersionLoc)) { throw new RuntimeException("Could not delete previous data version location (" + prevDataVersionLoc + ") for " + volume); } } } catch (IOException e) { throw new RuntimeException("Unable to set accumulo version: an error occurred.", e); } } }
private long removeFile(Path path) { try { if (!useTrash || !fs.moveToTrash(path)) { fs.deleteRecursively(path); } return 1; } catch (FileNotFoundException ex) { // ignored } catch (IOException ex) { log.error("Unable to delete wal {}", path, ex); } return 0; }
private static void initDirs(VolumeManager fs, UUID uuid, String[] baseDirs, boolean print) throws IOException { for (String baseDir : baseDirs) { fs.mkdirs(new Path(new Path(baseDir, ServerConstants.VERSION_DIR), "" + ServerConstants.DATA_VERSION), new FsPermission("700")); // create an instance id Path iidLocation = new Path(baseDir, ServerConstants.INSTANCE_ID_DIR); fs.mkdirs(iidLocation); fs.createNewFile(new Path(iidLocation, uuid.toString())); if (print) log.info("Initialized volume {}", baseDir); } }
private static void createDirectories(VolumeManager fs, String... dirs) throws IOException { for (String s : dirs) { Path dir = new Path(s); try { FileStatus fstat = fs.getFileStatus(dir); if (!fstat.isDirectory()) { log.error("FATAL: location {} exists but is not a directory", dir); return; } } catch (FileNotFoundException fnfe) { // attempt to create directory, since it doesn't exist if (!fs.mkdirs(dir)) { log.error("FATAL: unable to create directory {}", dir); return; } } } }
fs.deleteRecursively(new Path(destPath)); try (final FSDataInputStream fsinput = fs.open(srcPath)) { DFSLoggerInputStreams inputStreams; try { fs.mkdirs(new Path(destPath)); writeBuffer(destPath, Collections.emptyList(), part++); fs.create(SortedLogState.getFinishedMarkerPath(destPath)).close(); return; fs.create(new Path(destPath, "finished")).close(); log.info("Finished log sort {} {} bytes {} parts in {}ms", name, getBytesCopied(), part, getSortTime()); try { fs.mkdirs(new Path(destPath)); fs.create(SortedLogState.getFailedMarkerPath(destPath)).close(); } catch (IOException e) { log.error("Error creating failed flag file " + name, e);
VolumeManager fs = master.getFileSystem(); List<FileStatus> files = new ArrayList<>(); for (FileStatus entry : fs.listStatus(new Path(bulk))) { files.add(entry); if (!fs.createNewFile(writable)) { fs.delete(writable); if (!fs.createNewFile(writable)) throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY, "Unable to write to " + this.errorDir); fs.delete(writable); FSDataOutputStream failFile = fs.create(new Path(errorDir, BulkImport.FAILURES_TXT), true); try (BufferedWriter out = new BufferedWriter(new OutputStreamWriter(failFile, UTF_8))) { for (String f : filesToLoad) {
@Override public void process(String workID, byte[] data) { String paths[] = new String(data, UTF_8).split(","); Path orig = new Path(paths[0]); Path dest = new Path(paths[1]); Path tmp = new Path(dest.getParent(), dest.getName() + ".tmp"); VolumeManager vm = context.getVolumeManager(); try { FileSystem origFs = vm.getVolumeByPath(orig).getFileSystem(); FileSystem destFs = vm.getVolumeByPath(dest).getFileSystem(); FileUtil.copy(origFs, orig, destFs, tmp, false, true, context.getHadoopConf()); destFs.rename(tmp, dest); log.debug("copied {} to {}", orig, dest); } catch (IOException ex) { try { FileSystem destFs = vm.getVolumeByPath(dest).getFileSystem(); destFs.create(dest).close(); log.warn(" marked " + dest + " failed", ex); } catch (IOException e) { log.error("Unable to create failure flag file " + dest, e); } } }
public static void exportTable(VolumeManager fs, ServerContext context, String tableName, Table.ID tableID, String exportDir) throws Exception { fs.mkdirs(new Path(exportDir)); Path exportMetaFilePath = fs.getVolumeByPath(new Path(exportDir)).getFileSystem() .makeQualified(new Path(exportDir, Constants.EXPORT_FILE)); FSDataOutputStream fileOut = fs.create(exportMetaFilePath, false); ZipOutputStream zipOut = new ZipOutputStream(fileOut); BufferedOutputStream bufOut = new BufferedOutputStream(zipOut);
VolumeManager fs = master.getFileSystem(); if (!fs.exists(new Path(error, BulkImport.FAILURES_TXT))) return new CleanUpBulkImport(tableId, source, bulk, error); new InputStreamReader(fs.open(new Path(error, BulkImport.FAILURES_TXT)), UTF_8))) { String line = null; while ((line = in.readLine()) != null) { Path path = new Path(line); if (!fs.exists(new Path(error, path.getName()))) failures.put(new FileRef(line, path), line); Path orig = new Path(failure); Path dest = new Path(error, orig.getName()); fs.rename(orig, dest); log.debug("tid " + tid + " renamed " + orig + " to " + dest + ": import failed"); Path dest = new Path(error, orig.getName()); if (fs.exists(dest)) continue; fs.deleteRecursively(new Path(error, BulkImport.FAILURES_TXT)); return new CleanUpBulkImport(tableId, source, bulk, error);
try { FileOperations fileFactory = FileOperations.getInstance(); FileSystem ns = this.fs.getVolumeByPath(outputFilePath).getFileSystem(); mfw = fileFactory.newWriterBuilder() .forFile(outputFilePathName, ns, ns.getConf(), context.getCryptoService()) if (!fs.deleteRecursively(outputFile.path())) { if (fs.exists(outputFile.path())) { log.error("Unable to delete {}", outputFile); mfw.close(); } finally { if (!fs.deleteRecursively(outputFile.path())) if (fs.exists(outputFile.path())) log.error("Unable to delete {}", outputFile);
String expectedCompactedFile = path.substring(0, path.lastIndexOf("/delete+")) + "/" + filename.split("\\+")[1]; if (fs.exists(new Path(expectedCompactedFile))) { if (!fs.deleteRecursively(file.getPath())) log.warn("Delete of file: {} return false", file.getPath()); continue; if (deleteTmp) { log.warn("cleaning up old tmp file: {}", path); if (!fs.deleteRecursively(file.getPath())) log.warn("Delete of tmp file: {} return false", file.getPath());
VolumeManager fs = environment.getFileSystem(); fs.mkdirs(new Path(tableInfo.importDir)); FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir)); mappingsWriter = new BufferedWriter(new OutputStreamWriter(fs.create(path), UTF_8));
fullPath = fs.getFullPath(FileType.TABLE, switchedDelete); } else { fullPath = fs.getFullPath(FileType.TABLE, delete); if (moveToTrash(fullPath) || fs.deleteRecursively(fullPath)) { ++status.current.deleted; } else if (fs.exists(fullPath)) {
if (!fs.exists(new Path(tableInfo.exportDir, oldFileName))) { throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir)); fs.rename(fileStatus.getPath(), new Path(tableInfo.importDir, newName));
private static Path createTmpDir(AccumuloConfiguration acuConf, VolumeManager fs) throws IOException { String accumuloDir = fs.choose(Optional.<String> absent(), ServerConstants.getBaseUris()); Path result = null; while (result == null) { result = new Path(accumuloDir + Path.SEPARATOR + "tmp/idxReduce_" + String.format("%09d", new Random().nextInt(Integer.MAX_VALUE))); try { fs.getFileStatus(result); result = null; continue; } catch (FileNotFoundException fne) { // found an unused temp directory } fs.mkdirs(result); // try to reserve the tmp dir // In some versions of hadoop, two clients concurrently trying to create the same directory // might both return true // Creating a file is not subject to this, so create a special file to make sure we solely // will use this directory if (!fs.createNewFile(new Path(result, "__reserve"))) result = null; } return result; }
private static Path createTmpDir(ServerContext context, String tabletDirectory) throws IOException { VolumeManager fs = context.getVolumeManager(); Path result = null; while (result == null) { result = new Path(tabletDirectory + Path.SEPARATOR + "tmp/idxReduce_" + String.format("%09d", new SecureRandom().nextInt(Integer.MAX_VALUE))); try { fs.getFileStatus(result); result = null; continue; } catch (FileNotFoundException fne) { // found an unused temp directory } fs.mkdirs(result); // try to reserve the tmp dir // In some versions of hadoop, two clients concurrently trying to create the same directory // might both return true // Creating a file is not subject to this, so create a special file to make sure we solely // will use this directory if (!fs.createNewFile(new Path(result, "__reserve"))) result = null; } return result; }
FileStatus[] mapFiles = fs.listStatus(dirPath); .getFileStatus(new Path(fileStatus.getPath(), MapFile.DATA_FILE_NAME)); if (dataStatus.isDirectory()) { log.warn("{} is not a map file, ignoring", fileStatus.getPath()); Path newPath = new Path(bulkDir, newName); try { fs.rename(fileStatus.getPath(), newPath); log.debug("Moved {} to {}", fileStatus.getPath(), newPath); } catch (IOException E1) {
private long setAndGetCreatedTime(Path file, String tableId) throws IOException, MutationsRejectedException { long createdTime; if (fs.exists(file)) { createdTime = fs.getFileStatus(file).getModificationTime(); } else { createdTime = System.currentTimeMillis(); } Status status = Status.newBuilder().setCreatedTime(createdTime).build(); Mutation m = new Mutation(new Text(ReplicationSection.getRowPrefix() + file)); m.put(MetadataSchema.ReplicationSection.COLF, new Text(tableId), ProtobufUtil.toValue(status)); replicationWriter.addMutation(m); replicationWriter.flush(); return createdTime; } }