private DirLock(FileSystem fs, Path lockFile) throws IOException { if (fs.isDirectory(lockFile)) { throw new IllegalArgumentException(lockFile.toString() + " is not a directory"); } this.fs = fs; this.lockFile = lockFile; }
@Override public boolean accept(Path p) { try { return fs.isDirectory(p); } catch (IOException e) { throw new RuntimeException(e); } } };
@Override public boolean accept(Path p) { try { return fs.isDirectory(p); } catch (IOException e) { throw new RuntimeException(e); } } };
private static void validateOrMakeDir(FileSystem fs, Path dir, String dirDescription) { try { if (fs.exists(dir)) { if (!fs.isDirectory(dir)) { LOG.error(dirDescription + " directory is a file, not a dir. " + dir); throw new RuntimeException(dirDescription + " directory is a file, not a dir. " + dir); } } else if (!fs.mkdirs(dir)) { LOG.error("Unable to create " + dirDescription + " directory " + dir); throw new RuntimeException("Unable to create " + dirDescription + " directory " + dir); } } catch (IOException e) { LOG.error("Unable to create " + dirDescription + " directory " + dir, e); throw new RuntimeException("Unable to create " + dirDescription + " directory " + dir, e); } }
@Override public boolean isDirectory(String path) throws IOException { FileSystem hdfs = getFs(); return hdfs.isDirectory(new Path(path)); }
@Override public boolean accept(Path p) { try { if (!HiveSnapshotRegistrationPolicy.this.fs.isDirectory(p)) { return false; } } catch (IOException e) { throw Throwables.propagate(e); } return !HiveSnapshotRegistrationPolicy.this.snapshotPathPattern.isPresent() || HiveSnapshotRegistrationPolicy.this.snapshotPathPattern.get().matcher(p.toString()).matches(); } });
protected boolean isDirectory(FileSystem fs, @CheckForNull Boolean isDir, Path p) throws IOException { return isDir != null ? isDir : fs.isDirectory(p); } }
public static boolean hasContent(FileSystem fs, Path path) throws IOException { if (!fs.isDirectory(path)) { return true; } boolean content = false; for (FileStatus fileStatus : fs.listStatus(path)) { content = content || hasContent(fs, fileStatus.getPath()); if (content) { break; } } return content; }
public boolean isDirectory(Path f) throws java.io.IOException { return this.underlyingFs.isDirectory(replaceScheme(f, this.replacementScheme, this.underlyingScheme)); }
private List<Path> getChildSchemaDirectories(Path metadataDirectory) { try { if (!metadataFileSystem.isDirectory(metadataDirectory)) { return ImmutableList.of(); } ImmutableList.Builder<Path> childSchemaDirectories = ImmutableList.builder(); for (FileStatus child : metadataFileSystem.listStatus(metadataDirectory)) { if (!child.isDirectory()) { continue; } Path childPath = child.getPath(); if (childPath.getName().startsWith(".")) { continue; } if (metadataFileSystem.isFile(new Path(childPath, PRESTO_SCHEMA_FILE_NAME))) { childSchemaDirectories.add(childPath); } } return childSchemaDirectories.build(); } catch (IOException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); } }
private static boolean isDirectory(HdfsContext context, HdfsEnvironment hdfsEnvironment, Path path) { try { return hdfsEnvironment.getFileSystem(context, path).isDirectory(path); } catch (IOException e) { throw new PrestoException(HIVE_FILESYSTEM_ERROR, "Failed checking path: " + path, e); } }
private Path getExternalPath(HdfsContext context, String location) { try { Path path = new Path(location); if (!hdfsEnvironment.getFileSystem(context, path).isDirectory(path)) { throw new PrestoException(INVALID_TABLE_PROPERTY, "External location must be a directory"); } return path; } catch (IllegalArgumentException | IOException e) { throw new PrestoException(INVALID_TABLE_PROPERTY, "External location is not a valid file system URI", e); } }
/** * Returns the latest modified file at the uri of interest. * * @param uri Either a directory or a file on HDFS. If it is a file, the parent directory will be searched. * @param pattern A pattern matcher for file names in the directory of interest. Passing `null` results in matching any file in the directory. * * @return The URI of the file with the most recent modified timestamp. */ @Override public URI getLatestVersion(final URI uri, final @Nullable Pattern pattern) { final Path path = new Path(uri); try { return RetryUtils.retry( () -> { final FileSystem fs = path.getFileSystem(config); if (!fs.exists(path)) { return null; } return mostRecentInDir(fs.isDirectory(path) ? path : path.getParent(), pattern); }, shouldRetryPredicate(), DEFAULT_RETRY_COUNT ); } catch (Exception e) { throw Throwables.propagate(e); } }
public static String getFirstLevelAcidDirPath(Path dataPath, FileSystem fileSystem) throws IOException { if (dataPath == null) { return null; } String firstLevelAcidDir = getAcidSubDir(dataPath); if (firstLevelAcidDir != null) { return firstLevelAcidDir; } String acidDirPath = getFirstLevelAcidDirPath(dataPath.getParent(), fileSystem); if (acidDirPath == null) { return null; } // We need the path for directory so no need to append file name if (fileSystem.isDirectory(dataPath)) { return acidDirPath + Path.SEPARATOR + dataPath.getName(); } return acidDirPath; }
private NavigableSet<String> listResourcesImpl(String folderPath, boolean recursive) throws IOException { Path p = getRealHDFSPath(folderPath); String prefix = folderPath.endsWith("/") ? folderPath : folderPath + "/"; if (!fs.exists(p) || !fs.isDirectory(p)) { return null; } TreeSet<String> r; if (recursive) { r = getAllFilePath(p, prefix); } else { r = getFilePath(p, prefix); } return r.isEmpty() ? null : r; }
private synchronized void setTablePrivileges( String principalName, PrincipalType principalType, String databaseName, String tableName, Collection<HivePrivilegeInfo> privileges) { requireNonNull(principalName, "principalName is null"); requireNonNull(principalType, "principalType is null"); requireNonNull(databaseName, "databaseName is null"); requireNonNull(tableName, "tableName is null"); requireNonNull(privileges, "privileges is null"); try { Table table = getRequiredTable(databaseName, tableName); Path permissionsDirectory = getPermissionsDirectory(table); metadataFileSystem.mkdirs(permissionsDirectory); if (!metadataFileSystem.isDirectory(permissionsDirectory)) { throw new PrestoException(HIVE_METASTORE_ERROR, "Could not create permissions directory"); } Path permissionFilePath = getPermissionsPath(permissionsDirectory, principalName, principalType); List<PermissionMetadata> permissions = privileges.stream() .map(PermissionMetadata::new) .collect(toList()); writeFile("permissions", permissionFilePath, permissionsCodec, permissions, true); } catch (IOException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); } }
@Override public Path call() throws IOException { Path versionRootPath = PathUtils.combinePaths(SimpleHadoopFilesystemConfigStore.this.physicalStoreRoot.toString(), CONFIG_STORE_NAME, this.version); if (SimpleHadoopFilesystemConfigStore.this.fs.isDirectory(versionRootPath)) { return versionRootPath; } throw new VersionDoesNotExistException(getStoreURI(), this.version, String.format("Cannot find specified version under root %s", versionRootPath)); } }
@Override public void killOlderThan(long timestamp) throws IOException { Path taskLogDir = new Path(config.getDirectory()); FileSystem fs = taskLogDir.getFileSystem(hadoopConfig); if (fs.exists(taskLogDir)) { if (!fs.isDirectory(taskLogDir)) { throw new IOE("taskLogDir [%s] must be a directory.", taskLogDir); } RemoteIterator<LocatedFileStatus> iter = fs.listLocatedStatus(taskLogDir); while (iter.hasNext()) { LocatedFileStatus file = iter.next(); if (file.getModificationTime() < timestamp) { Path p = file.getPath(); log.info("Deleting hdfs task log [%s].", p.toUri().toString()); fs.delete(p, true); } if (Thread.currentThread().isInterrupted()) { throw new IOException( new InterruptedException("Thread interrupted. Couldn't delete all tasklogs.") ); } } } } }
Path externalLocation = new Path(table.getStorage().getLocation()); FileSystem externalFileSystem = hdfsEnvironment.getFileSystem(hdfsContext, externalLocation); if (!externalFileSystem.isDirectory(externalLocation)) { throw new PrestoException(HIVE_METASTORE_ERROR, "External table location does not exist");
private void verifiedPartition(Table table, Partition partition) { Path partitionMetadataDirectory = getPartitionMetadataDirectory(table, partition.getValues()); if (table.getTableType().equals(MANAGED_TABLE.name())) { if (!partitionMetadataDirectory.equals(new Path(partition.getStorage().getLocation()))) { throw new PrestoException(HIVE_METASTORE_ERROR, "Partition directory must be " + partitionMetadataDirectory); } } else if (table.getTableType().equals(EXTERNAL_TABLE.name())) { try { Path externalLocation = new Path(partition.getStorage().getLocation()); FileSystem externalFileSystem = hdfsEnvironment.getFileSystem(hdfsContext, externalLocation); if (!externalFileSystem.isDirectory(externalLocation)) { throw new PrestoException(HIVE_METASTORE_ERROR, "External partition location does not exist"); } if (isChildDirectory(catalogDirectory, externalLocation)) { throw new PrestoException(HIVE_METASTORE_ERROR, "External partition location can not be inside the system metadata directory"); } } catch (IOException e) { throw new PrestoException(HIVE_METASTORE_ERROR, "Could not validate external partition location", e); } } else { throw new PrestoException(NOT_SUPPORTED, "Partitions can not be added to " + table.getTableType()); } }