Refine search
/** * Get the fully-qualified output file path. * * @return the fully-qualified output file path */ public String getFullyQualifiedOutputFilePath() { return this.fs.makeQualified(this.outputFile).toString(); }
/** Return the current user's home directory in this FileSystem. * The default implementation returns {@code "/user/$USER/"}. */ public Path getHomeDirectory() { return this.makeQualified( new Path(USER_HOME_PREFIX + "/" + System.getProperty("user.name"))); }
public static String makeQualifiedPathInHBaseCluster(String inPath) { Path path = new Path(inPath); path = Path.getPathWithoutSchemeAndAuthority(path); FileSystem fs = HadoopUtil.getFileSystem(path, getCurrentHBaseConfiguration()); // Must be HBase's FS, not working FS return fs.makeQualified(path).toString(); }
fullPath = FileSystem.get(conf).makeQualified(new Path(path)).toString(); } catch (IllegalArgumentException e) { LOG.info("Call to makeQualified failed on " + path + " " + e.getMessage()); Path p = new Path(path); return getServerNameFromWALDirectoryName(p);
/** * Calculate the target filePath of the jar file to be copied on HDFS, * given the {@link FileStatus} of a jarFile and the path of directory that contains jar. */ private Path calculateDestJarFile(FileStatus status, Path jarFileDir) { // SNAPSHOT jars should not be shared, as different jobs may be using different versions of it Path baseDir = status.getPath().getName().contains("SNAPSHOT") ? this.unsharedJarsDir : jarFileDir; // DistributedCache requires absolute path, so we need to use makeQualified. return new Path(this.fs.makeQualified(baseDir), status.getPath().getName()); }
static String getQualifiedPath(Configuration conf, Path path) throws IOException { FileSystem fs; if (path == null) { return null; } fs = path.getFileSystem(conf); return fs.makeQualified(path).toString(); }
/** * Copy dependency jars from local fs to HDFS. */ private void copyDependencyJarsToHdfs() throws IOException { if (!this.state.contains(ConfigurationKeys.JOB_JAR_FILES_KEY)) { return; } LocalFileSystem lfs = FileSystem.getLocal(this.conf); Path tmpJarFileDir = new Path(this.tmpOutputDir, "_gobblin_compaction_jars"); this.state.setProp(COMPACTION_JARS, tmpJarFileDir.toString()); this.fs.delete(tmpJarFileDir, true); for (String jarFile : this.state.getPropAsList(ConfigurationKeys.JOB_JAR_FILES_KEY)) { for (FileStatus status : lfs.globStatus(new Path(jarFile))) { Path tmpJarFile = new Path(this.fs.makeQualified(tmpJarFileDir), status.getPath().getName()); this.fs.copyFromLocalFile(status.getPath(), tmpJarFile); LOG.info(String.format("%s will be added to classpath", tmpJarFile)); } } }
/** * Get the root directory of Trash for current user when the path specified * is deleted. * * @param path the trash root of the path to be determined. * @return the default implementation returns {@code /user/$USER/.Trash} */ public Path getTrashRoot(Path path) { return this.makeQualified(new Path(getHomeDirectory().toUri().getPath(), TRASH_PREFIX)); }
private static void checkDependencies(FileSystem srcFS, Path src, FileSystem dstFS, Path dst) throws IOException { if (srcFS == dstFS) { String srcq = srcFS.makeQualified(src).toString() + Path.SEPARATOR; String dstq = dstFS.makeQualified(dst).toString() + Path.SEPARATOR; if (dstq.startsWith(srcq)) { if (srcq.length() == dstq.length()) { throw new IOException("Cannot copy " + src + " to itself."); } else { throw new IOException("Cannot copy " + src + " to its subdirectory " + dst); } } } }
/** * Create location of Trash directory. Parsed from props at key {@link #TRASH_LOCATION_KEY}, defaulting to * /home/directory/_GOBBLIN_TRASH. * @param fs {@link org.apache.hadoop.fs.FileSystem} where trash should be found. * @param props {@link java.util.Properties} containing trash configuration. * @param user If the trash location contains the token $USER, the token will be replaced by the value of user. * @return {@link org.apache.hadoop.fs.Path} for trash directory. * @throws java.io.IOException */ protected Path createTrashLocation(FileSystem fs, Properties props, String user) throws IOException { Path trashLocation; if (props.containsKey(TRASH_LOCATION_KEY)) { trashLocation = new Path(props.getProperty(TRASH_LOCATION_KEY).replaceAll("\\$USER", user)); } else { trashLocation = new Path(fs.getHomeDirectory(), DEFAULT_TRASH_DIRECTORY); LOG.info("Using default trash location at " + trashLocation); } if (!trashLocation.isAbsolute()) { throw new IllegalArgumentException("Trash location must be absolute. Found " + trashLocation.toString()); } Path qualifiedTrashLocation = fs.makeQualified(trashLocation); ensureTrashLocationExists(fs, qualifiedTrashLocation); return qualifiedTrashLocation; }
/** Make sure that a path specifies a FileSystem. */ @Override public Path makeQualified(Path path) { Path fqPath = fs.makeQualified(path); // swap in our scheme if the filtered fs is using a different scheme if (swapScheme != null) { try { // NOTE: should deal with authority, but too much other stuff is broken fqPath = new Path( new URI(swapScheme, fqPath.toUri().getSchemeSpecificPart(), null) ); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } return fqPath; }
/** * @return paths from {@link FileStatus}es into one comma-separated String * @see FileInputFormat#addInputPath(org.apache.hadoop.mapreduce.Job, Path) */ private static String joinFSPaths(FileSystem fs, FileStatus[] statuses) { StringBuilder joined = new StringBuilder(); for (FileStatus status : statuses) { if (joined.length() > 0) { joined.append(','); } Path path = fs.makeQualified(status.getPath()); joined.append(StringUtils.escapeString(path.toString())); } return joined.toString(); }
continue; Path path = new Path(pathString); FileSystem fs = path.getFileSystem(jobConf); if (ignoreInvalidPath && !fs.exists(path)) { continue; final String qualifiedPath = fs.makeQualified(path).toString(); str.append(separator) .append(StringUtils.escapeString(qualifiedPath));
@Inject public HdfsDataSegmentPusher(HdfsDataSegmentPusherConfig config, Configuration hadoopConfig, ObjectMapper jsonMapper) { this.hadoopConfig = hadoopConfig; this.jsonMapper = jsonMapper; Path storageDir = new Path(config.getStorageDirectory()); this.fullyQualifiedStorageDirectory = Suppliers.memoize( () -> { try { return FileSystem.newInstance(storageDir.toUri(), hadoopConfig) .makeQualified(storageDir) .toUri() .toString(); } catch (Exception e) { throw new RuntimeException(e); } } ); log.info("Configured HDFS as deep storage"); }
/** * Convert path to qualified path. * * @param conf * Hive configuration. * @param path * Path to convert. * @return Qualified path */ public static String getQualifiedPath(HiveConf conf, Path path) throws HiveException { FileSystem fs; if (path == null) { return null; } try { fs = path.getFileSystem(conf); return fs.makeQualified(path).toString(); } catch (IOException e) { throw new HiveException(e); } }
stagingPathName = new Path(inputPathName, stagingDir).toString(); } else { stagingPathName = Path path = new Path( stagingPathName + "_" + generateExecutionId() + "-" + TaskRunner.getTaskRunnerID()); dir = fileSystem.makeQualified(path); "Cannot create staging directory '" + dir.toString() + "'"); "Cannot create staging directory '" + dir.toString() + "': " + e.getMessage(), e);
public int processFile(String fileName) throws IOException { Path path = new Path(fileName); FileSystem fs; if (!path.toUri().isAbsolute()) { fs = FileSystem.getLocal(conf); path = fs.makeQualified(path); } else { fs = FileSystem.get(path.toUri(), conf); } BufferedReader bufferReader = null; int rc = 0; try { bufferReader = new BufferedReader(new InputStreamReader(fs.open(path))); rc = processReader(bufferReader); } finally { IOUtils.closeStream(bufferReader); } return rc; }
public void lsr(Path p, List<String> results) throws IOException { if (!this.fs.getFileStatus(p).isDirectory()) { results.add(p.toString()); } Path qualifiedPath = this.fs.makeQualified(p); for (FileStatus status : this.fs.listStatus(p)) { if (status.isDirectory()) { // Fix for hadoop issue: https://issues.apache.org/jira/browse/HADOOP-12169 if (!qualifiedPath.equals(status.getPath())) { lsr(status.getPath(), results); } } else { results.add(status.getPath().toString()); } } }
public String getHdfsWorkingDirectory() { if (cachedHdfsWorkingDirectory != null) return cachedHdfsWorkingDirectory; String root = getOptional("kylin.env.hdfs-working-dir", "/kylin"); Path path = new Path(root); if (!path.isAbsolute()) throw new IllegalArgumentException("kylin.env.hdfs-working-dir must be absolute, but got " + root); try { FileSystem fs = path.getFileSystem(HadoopUtil.getCurrentConfiguration()); path = fs.makeQualified(path); } catch (IOException e) { throw new RuntimeException(e); } // append metadata-url prefix root = new Path(path, StringUtils.replaceChars(getMetadataUrlPrefix(), ':', '-')).toString(); if (!root.endsWith("/")) root += "/"; cachedHdfsWorkingDirectory = root; if (cachedHdfsWorkingDirectory.startsWith(FILE_SCHEME)) { cachedHdfsWorkingDirectory = cachedHdfsWorkingDirectory.replace(FILE_SCHEME, "file://"); } else if (cachedHdfsWorkingDirectory.startsWith(MAPRFS_SCHEME)) { cachedHdfsWorkingDirectory = cachedHdfsWorkingDirectory.replace(MAPRFS_SCHEME, "maprfs://"); } return cachedHdfsWorkingDirectory; }
/** * Add local non-jar files the job depends on to DistributedCache. */ @SuppressWarnings("deprecation") private void addLocalFiles(Path jobFileDir, String jobFileList, Configuration conf) throws IOException { DistributedCache.createSymlink(conf); for (String jobFile : SPLITTER.split(jobFileList)) { Path srcJobFile = new Path(jobFile); // DistributedCache requires absolute path, so we need to use makeQualified. Path destJobFile = new Path(this.fs.makeQualified(jobFileDir), srcJobFile.getName()); // Copy the file from local file system to HDFS this.fs.copyFromLocalFile(srcJobFile, destJobFile); // Create a URI that is in the form path#symlink URI destFileUri = URI.create(destJobFile.toUri().getPath() + "#" + destJobFile.getName()); LOG.info(String.format("Adding %s to DistributedCache", destFileUri)); // Finally add the file to DistributedCache with a symlink named after the file name DistributedCache.addCacheFile(destFileUri, conf); } }