Refine search
String mrDir = cfg.get(MRJobConfig.MAPREDUCE_JOB_DIR); stagingDir = new Path(new URI(mrDir)); FileSystem fs = job.fileSystem(stagingDir.toUri(), cfg); if (!fs.exists(stagingDir)) throw new IgniteCheckedException("Failed to find map-reduce submission " + "directory (does not exist): " + stagingDir); if (!FileUtil.copy(fs, stagingDir, jobLocDir, false, cfg)) throw new IgniteCheckedException("Failed to copy job submission directory " + "contents to local file system " clsPathUrls.add(jarJobFile.toURI().toURL());
@BeforeClass public static void createHDFS() { try { baseDir = new File("./target/localfs/fs_tests").getAbsoluteFile(); FileUtil.fullyDelete(baseDir); org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration(); localFsURI = "file:///" + baseDir + "/"; localFs = new org.apache.hadoop.fs.Path(localFsURI).getFileSystem(hdConf); } catch (Throwable e) { e.printStackTrace(); Assert.fail("Test failed " + e.getMessage()); } }
conf.get(LOCAL_DIR_KEY, DEFAULT_LOCAL_DIR) + TMP_JARS_DIR; synchronized (parentDirLockSet) { if (!parentDirLockSet.contains(parentDirStr)) { Path parentDir = new Path(parentDirStr); FileSystem fs = FileSystem.getLocal(conf); fs.delete(parentDir, true); // it's ok if the dir doesn't exist now parentDirLockSet.add(parentDirStr); if (!fs.mkdirs(parentDir) && !fs.getFileStatus(parentDir).isDirectory()) { throw new RuntimeException("Failed to create local dir " + parentDirStr + ", CoprocessorClassLoader failed to init"); FileSystem fs = pathPattern.getFileSystem(conf); Path pathPattern1 = fs.isDirectory(pathPattern) ? new Path(pathPattern, "*.jar") : pathPattern; // append "*.jar" if a directory is specified FileStatus[] fileStatuses = fs.globStatus(pathPattern1); // return all files that match the pattern if (fileStatuses == null || fileStatuses.length == 0) { // if no one matches } else { boolean validFileEncountered = false; for (Path path : FileUtil.stat2Paths(fileStatuses)) { // for each file that match the pattern if (fs.isFile(path)) { // only process files, skip for directories File dst = new File(parentDirStr, "." + pathPrefix + "." dst.deleteOnExit(); addURL(dst.getCanonicalFile().toURI().toURL()); addURL(file.toURI().toURL());
boolean overwrite, Configuration conf) throws IOException { Path src = srcStatus.getPath(); dst = checkDest(src.getName(), dstFS, dst, overwrite); if (srcStatus.isDirectory()) { checkDependencies(srcFS, src, dstFS, dst); if (!dstFS.mkdirs(dst)) { return false; FileStatus contents[] = srcFS.listStatus(src); for (int i = 0; i < contents.length; i++) { copy(srcFS, contents[i], dstFS, new Path(dst, contents[i].getPath().getName()), deleteSource, overwrite, conf); OutputStream out = null; try { in = srcFS.open(src); out = dstFS.create(dst, overwrite); IOUtils.copyBytes(in, out, conf, true);
URI uri = (URI)pathObj; srcPath = new Path(uri); String locName = srcPath.getName(); clsPathUrls.add(dstPath.toURI().toURL()); FileSystem dstFs = FileSystem.getLocal(cfg); FileSystem srcFs = job.fileSystem(srcPath.toUri(), cfg); FileUtil.copy(srcFs, srcPath, dstFs, new Path(archiveFile.toString()), false, cfg); RunJar.unJar(archiveFile, dstPath); else if (archiveNameLC.endsWith(FilePageStoreManager.ZIP_SUFFIX)) FileUtil.unZip(archiveFile, dstPath); else if (archiveNameLC.endsWith(".tar.gz") || archiveNameLC.endsWith(".tgz") || archiveNameLC.endsWith(".tar")) FileUtil.unTar(archiveFile, dstPath); else throw new IOException("Cannot unpack archive [path=" + srcPath + ", jobId=" + jobId + ']'); FileUtil.copy(srcFs, srcPath, dstFs, new Path(dstPath.toString()), false, cfg); ctx.getJobConf().setStrings(rsrcNameProp, res.toArray(new String[res.size()]));
conf.setVar(ConfVars.HIVEADDEDJARS, Utilities.getResourceFiles(conf, SessionState.ResourceType.JAR)); Path planPath = new Path(ctx.getLocalTmpPath(), "plan.xml"); MapredLocalWork plan = getWork(); LOG.info("Generating plan file " + planPath.toString()); out = FileSystem.getLocal(conf).create(planPath); SerializationUtilities.serializePlan(plan, out); out.close(); String cmdLine = hadoopExec + " jar " + jarCmd + " -localtask -plan " + planPath.toString() + " " + isSilent + " " + hiveConfArgs; cmdLine = cmdLine + " -files " + files; workDir = ctx.getLocalTmpPath().toUri().getPath(); Path p = new Path(f); String target = p.toUri().getPath(); String link = workDir + Path.SEPARATOR + p.getName(); if (FileUtil.symLink(target, link) != 0) { throw new IOException("Cannot link to added file: " + target + " from: " + link);
@BeforeClass public static void setup() throws Exception { System.clearProperty("mapred.job.tracker"); String testDir = System.getProperty("test.tmp.dir", "./"); testDir = testDir + "/test_multitable_" + Math.abs(new Random().nextLong()) + "/"; workDir = new File(new File(testDir).getCanonicalPath()); FileUtil.fullyDelete(workDir); workDir.mkdirs(); warehousedir = new Path(System.getProperty("test.warehouse.dir")); HiveConf metastoreConf = new HiveConf(); metastoreConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, warehousedir.toString()); // Run hive metastore server MetaStoreTestUtils.startMetaStoreWithRetry(metastoreConf); // Read the warehouse dir, which can be changed so multiple MetaStore tests could be run on // the same server warehousedir = new Path(MetastoreConf.getVar(metastoreConf, MetastoreConf.ConfVars.WAREHOUSE)); // LocalJobRunner does not work with mapreduce OutputCommitter. So need // to use MiniMRCluster. MAPREDUCE-2350 Configuration conf = new Configuration(true); conf.set("yarn.scheduler.capacity.root.queues", "default"); conf.set("yarn.scheduler.capacity.root.default.capacity", "100"); FileSystem fs = FileSystem.get(conf); System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath()); mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null, new JobConf(conf)); mrConf = mrCluster.createJobConf(); initializeSetup(metastoreConf); warehousedir.getFileSystem(conf).mkdirs(warehousedir); }
private void distributeFiles() { try { URI[] uris = DistributedCache.getCacheFiles(conf); if (uris != null) { URI[] outURIs = new URI[uris.length]; for (int i = 0; i < uris.length; i++) { Path path = new Path(uris[i]); FileSystem fs = path.getFileSystem(conf); if (fs.isFile(path)) { outURIs[i] = uris[i]; } else { Path mergePath = new Path(path.getParent(), "sparkreadable-" + path.getName()); FileUtil.copyMerge(fs, path, fs, mergePath, false, conf, ""); outURIs[i] = mergePath.toUri(); } sparkContext.addFile(outURIs[i].toString()); } DistributedCache.setCacheFiles(outURIs, conf); } } catch (IOException e) { throw new RuntimeException("Error retrieving cache files", e); } }
private String downloadResource(URI srcUri, String subDir, boolean convertToUnix) throws IOException, URISyntaxException { LOG.debug("Converting to local {}", srcUri); File destinationDir = (subDir == null) ? resourceDir : new File(resourceDir, subDir); ensureDirectory(destinationDir); File destinationFile = new File(destinationDir, new Path(srcUri.toString()).getName()); String dest = destinationFile.getCanonicalPath(); if (destinationFile.exists()) { return dest; } FileSystem fs = FileSystem.get(srcUri, conf); fs.copyToLocalFile(new Path(srcUri.toString()), new Path(dest)); // add "execute" permission to downloaded resource file (needed when loading dll file) FileUtil.chmod(dest, "ugo+rx", true); return dest; }
@BeforeClass public static void setup() throws Exception { conf = new Configuration(); conf.setInt(HttpServer.HTTP_MAX_THREADS, TestHttpServer.MAX_THREADS); FileUtil.fullyDelete(base); base.mkdirs(); keystoresDir = new File(BASEDIR).getAbsolutePath(); Configuration sslConf = new Configuration(false); sslConf.addResource("ssl-server.xml"); sslConf.addResource("ssl-client.xml"); .addEndpoint(new URI("https://localhost")) .setConf(conf) .keyPassword(HBaseConfiguration.getPassword(sslConf, "ssl.server.keystore.keypassword",
public static void restoreUploadedFiles(FileSystem fs, Path backupDir, HomeFileConf homeFileStore, BackupStats backupStats, String hostname) throws IOException { // restore uploaded files final Path uploadsBackupDir = new Path(backupDir.toUri().getPath(), "uploads"); FileSystem fs2 = homeFileStore.getFilesystemAndCreatePaths(hostname); fs2.delete(homeFileStore.getPath(), true); FileUtil.copy(fs, uploadsBackupDir, fs2, homeFileStore.getInnerUploads(), false, false, new Configuration()); backupStats.files = fs.getContentSummary(backupDir).getFileCount(); }
FlowFile flowFile = session.create(parentFlowFile); try { final String originalFilename = file.getName(); final Path configuredRootOutputDirPath = processorConfig.getOutputDirectory(); final Path newFile = new Path(configuredRootOutputDirPath, originalFilename); final boolean destinationExists = hdfs.exists(newFile); switch (processorConfig.getConflictResolution()) { case REPLACE_RESOLUTION: if (hdfs.delete(file, false)) { getLogger().info("deleted {} in order to replace with the contents of {}", new Object[]{file, flowFile}); if (!hdfs.getFileStatus(configuredRootOutputDirPath).isDirectory()) { throw new IOException(configuredRootOutputDirPath.toString() + " already exists and is not a directory"); if (FileUtil.copy(hdfs, file, hdfs, newFile, false, conf)) { moved = true; break;// copy was successful
FileSystem destFS = dest.getFileSystem(conf); FileSystem srcFs = FileSystem.getLocal(conf); if (src != null && !checkPreExisting(srcFs, src, dest, conf)) { String srcStr = src.toString(); LOG.info("Localizing resource because it does not exist: " + srcStr + " to dest: " + dest); Object notifierNew = new Object(), if (src.toUri().getScheme()!=null) { FileUtil.copy(src.getFileSystem(conf), src, destFS, dest, false, false, conf); destFS.copyFromLocalFile(false, false, src, dest);
URI workingURI = workingDirFs.getUri(); URI rootURI = fs.getUri(); if ((!workingURI.getScheme().equals(rootURI.getScheme()) || !workingURI.getAuthority().equals(rootURI.getAuthority()) || workingURI.getUserInfo() == null || !workingURI.getUserInfo().equals(rootURI.getUserInfo()) || !fs.rename(workingDir, snapshotDir)) && !FileUtil.copy(workingDirFs, workingDir, fs, snapshotDir, true, true, this.conf)) { throw new SnapshotCreationException("Failed to copy working directory(" + workingDir
@SuppressWarnings("deprecation") @Override public void createSymlink(Path target, Path link, boolean createParent) throws IOException { if (!FileSystem.areSymlinksEnabled()) { throw new UnsupportedOperationException("Symlinks not supported"); } final String targetScheme = target.toUri().getScheme(); if (targetScheme != null && !"file".equals(targetScheme)) { throw new IOException("Unable to create symlink to non-local file "+ "system: "+target.toString()); } if (createParent) { mkdirs(link.getParent()); } // NB: Use createSymbolicLink in java.nio.file.Path once available int result = FileUtil.symLink(target.toString(), makeAbsolute(link).toString()); if (result != 0) { throw new IOException("Error " + result + " creating symlink " + link + " to " + target); } }
@Override public String prepareBulkLoad(final byte[] family, final String srcPath, boolean copyFile) throws IOException { Path p = new Path(srcPath); Path stageP = new Path(stagingDir, new Path(Bytes.toString(family), p.getName())); srcFs = FileSystem.newInstance(p.toUri(), conf); LOG.debug("Bulk-load file " + srcPath + " is on different filesystem than " + "the destination filesystem. Copying file over to destination staging dir."); FileUtil.copy(srcFs, p, fs, stageP, false, conf); } else if (copyFile) { LOG.debug("Bulk-load file " + srcPath + " is copied to destination staging dir."); FileUtil.copy(srcFs, p, fs, stageP, false, conf); } else { LOG.debug("Moving " + p + " to " + stageP); FileStatus origFileStatus = fs.getFileStatus(p); origPermissions.put(srcPath, origFileStatus.getPermission()); if(!fs.rename(p, stageP)) { throw new IOException("Failed to move HFile: " + p + " to " + stageP);
private void migrateOldLayout() throws IOException { FileStatus[] sliceFiles = fileSystem.listStatus(basePath, new PathFilter() { @Override public boolean accept(Path path) { Path indexFile = new Path(basePath, V1_INDEX_NAME); if (fileSystem.exists(indexFile) && sliceFiles.length > 0) { // old layout final long version = System.currentTimeMillis(); Path tempDir = new Path(basePath, "tmp_" + VERSION_PREFIX + version); Path versionDir = getVersionDir(version); fileSystem.mkdirs(tempDir); FileUtil.copy(fileSystem, indexFile, fileSystem, tempDir, false, conf); for (FileStatus sliceFile : sliceFiles) { FileUtil.copy(fileSystem, sliceFile.getPath(), fileSystem, tempDir, false, conf); fileSystem.delete(indexFile, false); for (FileStatus sliceFile : sliceFiles) { fileSystem.delete(sliceFile.getPath(), true);
private static List<Path> copyFiles(FileSystem srcFS, Path src, FileSystem dstFS, Path dst, Configuration conf, ExecutorService pool, List<Future<Void>> futures) throws IOException { List<Path> traversedPaths = new ArrayList<>(); traversedPaths.add(dst); FileStatus currentFileStatus = srcFS.getFileStatus(src); if (currentFileStatus.isDirectory()) { if (!dstFS.mkdirs(dst)) { throw new IOException("create dir failed: " + dst); } FileStatus[] subPaths = srcFS.listStatus(src); for (FileStatus subPath : subPaths) { traversedPaths.addAll(copyFiles(srcFS, subPath.getPath(), dstFS, new Path(dst, subPath.getPath().getName()), conf, pool, futures)); } } else { Future<Void> future = pool.submit(() -> { FileUtil.copy(srcFS, src, dstFS, dst, false, false, conf); return null; }); futures.add(future); } return traversedPaths; } }
/** * merge all files in <code>sourceDir</code> into local <code>targetFile</code>, retrying on failure */ @Deprecated public static void copyMergeToLocal(String srcDir, String dstFile, int numTries, long delayBetweenTries) throws IOException { Configuration conf = new Configuration(); FileSystem hdfs = getFS(); FileSystem localfs = FileSystem.getLocal(conf); while (numTries-- > 0) { if (FileUtil.copyMerge(hdfs, new Path(srcDir), localfs, new Path(dstFile), false, conf, null)) { return; } try { Thread.sleep(delayBetweenTries); } catch (InterruptedException ie) { throw new RuntimeException(ie); } } throw new IOException("Could not copyMerge from \"" + srcDir + "\" to \"" + dstFile + "\"!"); }