Refine search
@Override public boolean create(String storeName) throws IOException { Path storePath = new Path(this.storeRootDir, storeName); return this.fs.exists(storePath) || this.fs.mkdirs(storePath, new FsPermission((short) 0755)); }
@Override public void setMode(String path, short mode) throws IOException { FileSystem hdfs = getFs(); try { FileStatus fileStatus = hdfs.getFileStatus(new Path(path)); hdfs.setPermission(fileStatus.getPath(), new FsPermission(mode)); } catch (IOException e) { LOG.warn("Fail to set permission for {} with perm {} : {}", path, mode, e.getMessage()); throw e; } }
public HdfsBlobStoreImpl(Path path, Map<String, Object> conf, Configuration hconf) throws IOException { LOG.debug("Blob store based in {}", path); _fullPath = path; _hadoopConf = hconf; _fs = path.getFileSystem(_hadoopConf); if (!_fs.exists(_fullPath)) { FsPermission perms = new FsPermission(BLOBSTORE_DIR_PERMISSION); boolean success = _fs.mkdirs(_fullPath, perms); if (!success) { throw new IOException("Error creating blobstore directory: " + _fullPath); } } Object shouldCleanup = conf.get(Config.BLOBSTORE_CLEANUP_ENABLE); if (ObjectReader.getBoolean(shouldCleanup, false)) { LOG.debug("Starting hdfs blobstore cleaner"); TimerTask cleanup = new TimerTask() { @Override public void run() { try { fullCleanup(FULL_CLEANUP_FREQ); } catch (IOException e) { LOG.error("Error trying to cleanup", e); } } }; timer = new Timer("HdfsBlobStore cleanup thread", true); timer.scheduleAtFixedRate(cleanup, 0, FULL_CLEANUP_FREQ); } }
private static FileOutputStream insecureCreateForWrite(File f, int permissions) throws IOException { // If we can't do real security, do a racy exists check followed by an // open and chmod if (f.exists()) { throw new AlreadyExistsException("File " + f + " already exists"); } FileOutputStream fos = new FileOutputStream(f); boolean success = false; try { rawFilesystem.setPermission(new Path(f.getAbsolutePath()), new FsPermission((short)permissions)); success = true; return fos; } finally { if (!success) { fos.close(); } } }
private Path createScratchDir() throws IOException { Path parent = new Path(SessionState.get().getHdfsScratchDirURIString(), SPARK_DIR); Path sparkDir = new Path(parent, sessionId); FileSystem fs = sparkDir.getFileSystem(conf); FsPermission fsPermission = new FsPermission(HiveConf.getVar( conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION)); fs.mkdirs(sparkDir, fsPermission); fs.deleteOnExit(sparkDir); return sparkDir; }
static void createPath(HiveConf conf, Path path, String permission, boolean isLocal, boolean isCleanUp) throws IOException { FsPermission fsPermission = new FsPermission(permission); FileSystem fs; if (isLocal) { if (!fs.mkdirs(path, fsPermission)) { throw new IOException("Failed to create directory " + path + " on fs " + fs.getUri());
/** * Persists a *.metadata file to a specific directory in HDFS. * * @param directoryPath where to write the metadata file. * @param outputFs {@link org.apache.hadoop.fs.FileSystem} where to write the file * @param metadataFileName name of the file (including extension) * @param metadata {@link voldemort.store.readonly.ReadOnlyStorageMetadata} to persist on HDFS * @throws IOException if the FileSystem operations fail */ private void writeMetadataFile(Path directoryPath, FileSystem outputFs, String metadataFileName, ReadOnlyStorageMetadata metadata) throws IOException { Path metadataPath = new Path(directoryPath, metadataFileName); FSDataOutputStream metadataStream = outputFs.create(metadataPath); outputFs.setPermission(metadataPath, new FsPermission(HADOOP_FILE_PERMISSION)); metadataStream.write(metadata.toJsonString().getBytes()); metadataStream.flush(); metadataStream.close(); }
private Path createScratchDir() throws IOException { Path parent = new Path(SessionState.get().getHdfsScratchDirURIString(), SPARK_DIR); Path sparkDir = new Path(parent, sessionId); FileSystem fs = sparkDir.getFileSystem(conf); FsPermission fsPermission = new FsPermission(HiveConf.getVar( conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION)); fs.mkdirs(sparkDir, fsPermission); fs.deleteOnExit(sparkDir); return sparkDir; }
/** * Create a given path if it doesn't exist. * * @param conf * @param path * @param permission * @param isLocal * @param isCleanUp * @return * @throws IOException */ private static void createPath(HiveConf conf, Path path, String permission, boolean isLocal, boolean isCleanUp) throws IOException { FsPermission fsPermission = new FsPermission(permission); FileSystem fs; if (isLocal) { fs = FileSystem.getLocal(conf); } else { fs = path.getFileSystem(conf); } if (!fs.exists(path)) { fs.mkdirs(path, fsPermission); String dirType = isLocal ? "local" : "HDFS"; LOG.info("Created " + dirType + " directory: " + path.toString()); } if (isCleanUp) { fs.deleteOnExit(path); } }
private void addToLocalResources(FileSystem fs, String fileSrcPath, String fileDstPath, String appId, Map<String, LocalResource> localResources, String resources) throws IOException { String suffix = jstormClientContext.appName + JOYConstants.BACKLASH + appId + JOYConstants.BACKLASH + fileDstPath; Path dst = new Path(fs.getHomeDirectory(), suffix); if (fileSrcPath == null) { FSDataOutputStream ostream = null; try { ostream = FileSystem .create(fs, dst, new FsPermission(JOYConstants.FS_PERMISSION)); ostream.writeUTF(resources); } finally { IOUtils.closeQuietly(ostream); } } else { fs.copyFromLocalFile(new Path(fileSrcPath), dst); } FileStatus scFileStatus = fs.getFileStatus(dst); LocalResource scRsrc = LocalResource.newInstance( ConverterUtils.getYarnUrlFromURI(dst.toUri()), LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, scFileStatus.getLen(), scFileStatus.getModificationTime()); localResources.put(fileDstPath, scRsrc); }
/** * createTezDir creates a temporary directory in the scratchDir folder to * be used with Tez. Assumes scratchDir exists. */ private Path createTezDir(String sessionId) throws IOException { // tez needs its own scratch dir (per session) Path tezDir = new Path(SessionState.get().getHdfsScratchDirURIString(), TEZ_DIR); tezDir = new Path(tezDir, sessionId); FileSystem fs = tezDir.getFileSystem(conf); FsPermission fsPermission = new FsPermission(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION)); fs.mkdirs(tezDir, fsPermission); // Make sure the path is normalized (we expect validation to pass since we just created it). tezDir = DagUtils.validateTargetDir(tezDir, conf).getPath(); // Directory removal will be handled by cleanup at the SessionState level. return tezDir; }
FsPermission dirPerms = new FsPermission(conf.get(dirPermsConfName, "700")); if (!fs.exists(p)) { if (isSecurityEnabled) { if (!fs.mkdirs(p, secureRootSubDirPerms)) { throw new IOException("HBase directory '" + p + "' creation failure."); if (!fs.mkdirs(p)) { throw new IOException("HBase directory '" + p + "' creation failure.");
octalPerms = octalPerms.substring(len-4, len); FsPermission perms = new FsPermission(Short.parseShort(octalPerms, 8)); String owner = tokens.nextToken(); String group = tokens.nextToken(); target = target.substring(1, target.length()-1); if (!target.isEmpty()) { symlink = new Path(target);
Path dirPath = new Path(scheme, authority, scratchDir + "-" + TaskRunner.getTaskRunnerID()); if (mkdir) { try { FileSystem fs = dirPath.getFileSystem(conf); dirPath = new Path(fs.makeQualified(dirPath).toString()); FsPermission fsPermission = new FsPermission(scratchDirPermission); if (!fs.mkdirs(dirPath, fsPermission)) { throw new RuntimeException("Cannot make directory: " + dirPath.toString());
/** * Create the output folder and optionally set ownership. */ private void createOutputPath(final Path path) throws IOException { if (filesUser == null && filesGroup == null) { outputFs.mkdirs(path); } else { Path parent = path.getParent(); if (!outputFs.exists(parent) && !parent.isRoot()) { createOutputPath(parent); } outputFs.mkdirs(path); if (filesUser != null || filesGroup != null) { // override the owner when non-null user/group is specified outputFs.setOwner(path, filesUser, filesGroup); } if (filesMode > 0) { outputFs.setPermission(path, new FsPermission(filesMode)); } } }
for (Config retainedConfig : retainedConfigs) { Path fullFilePath = new Path(testTempDirPath, PathUtils.withoutLeadingSeparator(new Path(retainedConfig .getString(TEST_DATA_PATH_LOCAL_KEY)))); Assert.assertTrue(this.fs.exists(fullFilePath), .getString(TEST_DATA_PATH_LOCAL_KEY)))); Assert.assertEquals(this.fs.getFileStatus(fullFilePath).getPermission(), new FsPermission(permissionsConfig.getString(TEST_DATA_PERMISSIONS_KEY)), String.format("Permissions check failed for %s", fullFilePath));
/** * Moves a copied path into a persistent location managed by gobblin-distcp. This method is used when an already * copied file cannot be successfully published. In future runs, instead of re-copying the file, distcp will use the * persisted file. * * @param state {@link State} containing job information. * @param file {@link org.apache.gobblin.data.management.copy.CopyEntity} from which input {@link Path} originated. * @param path {@link Path} to persist. * @return true if persist was successful. * @throws IOException */ public boolean persistFile(State state, CopyableFile file, Path path) throws IOException { if (!this.persistDir.isPresent()) { return false; } String guid = computeGuid(state, file); Path guidPath = new Path(this.persistDir.get(), guid); if (!this.fs.exists(guidPath)) { this.fs.mkdirs(guidPath, new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE)); } Path targetPath = new Path(guidPath, shortenPathName(file.getOrigin().getPath(), 250 - guid.length())); log.info(String.format("Persisting file %s with guid %s to location %s.", path, guid, targetPath)); if (this.fs.rename(path, targetPath)) { this.fs.setTimes(targetPath, System.currentTimeMillis(), -1); return true; } return false; }
@Override public OutputStream getOutputStream() throws IOException { checkIsNotTmp(); OutputStream out = null; FsPermission fileperms = new FsPermission(BLOBSTORE_FILE_PERMISSION); try { out = _fs.create(_path, (short)this.getMetadata().get_replication_factor()); _fs.setPermission(_path, fileperms); _fs.setReplication(_path, (short)this.getMetadata().get_replication_factor()); } catch (IOException e) { //Try to create the parent directory, may not work FsPermission dirperms = new FsPermission(HdfsBlobStoreImpl.BLOBSTORE_DIR_PERMISSION); if (!_fs.mkdirs(_path.getParent(), dirperms)) { LOG.warn("error creating parent dir: " + _path.getParent()); } out = _fs.create(_path, (short)this.getMetadata().get_replication_factor()); _fs.setPermission(_path, dirperms); _fs.setReplication(_path, (short)this.getMetadata().get_replication_factor()); } if (out == null) { throw new IOException("Error in creating: " + _path); } return out; }
this.checkSumDigestValue[chunkId] = CheckSum.getInstance(checkSumType); this.position[chunkId] = 0; this.taskIndexFileName[chunkId] = new Path(FileOutputFormat.getOutputPath(conf), getStoreName() + "." + Integer.toString(chunkId) + "_" + this.taskId + INDEX_FILE_EXTENSION + fileExtension); this.taskValueFileName[chunkId] = new Path(FileOutputFormat.getOutputPath(conf), getStoreName() + "." + Integer.toString(chunkId) + "_" new FsPermission(HadoopStoreBuilder.HADOOP_FILE_PERMISSION)); logger.info("Setting permission to 755 for " + this.taskIndexFileName[chunkId]); fs.setPermission(this.taskValueFileName[chunkId], new FsPermission(HadoopStoreBuilder.HADOOP_FILE_PERMISSION)); logger.info("Setting permission to 755 for " + this.taskValueFileName[chunkId]);
private QueryResultsCache(HiveConf configuration) throws IOException { this.conf = configuration; // Set up cache directory Path rootCacheDir = new Path(conf.getVar(HiveConf.ConfVars.HIVE_QUERY_RESULTS_CACHE_DIRECTORY)); LOG.info("Initializing query results cache at {}", rootCacheDir); Utilities.ensurePathIsWritable(rootCacheDir, conf); String currentCacheDirName = "results-" + UUID.randomUUID().toString(); cacheDirPath = new Path(rootCacheDir, currentCacheDirName); FileSystem fs = cacheDirPath.getFileSystem(conf); FsPermission fsPermission = new FsPermission("700"); fs.mkdirs(cacheDirPath, fsPermission); // Create non-existent path for 0-row results zeroRowsPath = new Path(cacheDirPath, "dummy_zero_rows"); // Results cache directory should be cleaned up at process termination. fs.deleteOnExit(cacheDirPath); maxCacheSize = conf.getLongVar(HiveConf.ConfVars.HIVE_QUERY_RESULTS_CACHE_MAX_SIZE); maxEntrySize = conf.getLongVar(HiveConf.ConfVars.HIVE_QUERY_RESULTS_CACHE_MAX_ENTRY_SIZE); maxEntryLifetime = conf.getTimeVar( HiveConf.ConfVars.HIVE_QUERY_RESULTS_CACHE_MAX_ENTRY_LIFETIME, TimeUnit.MILLISECONDS); LOG.info("Query results cache: cacheDirectory {}, maxCacheSize {}, maxEntrySize {}, maxEntryLifetime {}", cacheDirPath, maxCacheSize, maxEntrySize, maxEntryLifetime); }