FsPermission writableHDFSDirPermission = new FsPermission((short)00733); FileSystem fs = rootHDFSDirPath.getFileSystem(conf); if (!fs.exists(rootHDFSDirPath)) { synchronized (ROOT_HDFS_DIR_LOCK) { if (!fs.exists(rootHDFSDirPath)) { Utilities.createDirsWithPermission(conf, rootHDFSDirPath, writableHDFSDirPermission, true); FsPermission currentHDFSDirPermission = fs.getFileStatus(rootHDFSDirPath).getPermission(); if (rootHDFSDirPath.toUri() != null) { String schema = rootHDFSDirPath.toUri().getScheme(); LOG.debug("HDFS dir: " + rootHDFSDirPath + " with schema " + schema + ", permission: " + currentHDFSDirPermission); if (!((currentHDFSDirPermission.toShort() & writableHDFSDirPermission .toShort()) == writableHDFSDirPermission.toShort())) { throw new RuntimeException("The dir: " + rootHDFSDirPath + " on HDFS should be writable. Current permissions are: " + currentHDFSDirPermission);
public static void checkFileAccess(FileSystem fs, FileStatus stat, FsAction action, String user, List<String> groups) throws IOException, AccessControlException { if (groups == null) { groups = emptyGroups; } String superGroupName = getSuperGroupName(fs.getConf()); if (userBelongsToSuperGroup(superGroupName, groups)) { LOG.info("User \"" + user + "\" belongs to super-group \"" + superGroupName + "\". " + "Permission granted for action: " + action + "."); return; } final FsPermission dirPerms = stat.getPermission(); final String grp = stat.getGroup(); if (user.equals(stat.getOwner())) { if (dirPerms.getUserAction().implies(action)) { return; } } else if (groups.contains(grp)) { if (dirPerms.getGroupAction().implies(action)) { return; } } else if (dirPerms.getOtherAction().implies(action)) { return; } throw new AccessControlException("action " + action + " not permitted on path " + stat.getPath() + " for user " + user); }
FileSystem fs = p.getFileSystem(conf); FsPermission dirPerms = new FsPermission(conf.get(dirPermsConfName, "700")); if (!fs.exists(p)) { if (isSecurityEnabled) { if (!fs.mkdirs(p, secureRootSubDirPerms)) { throw new IOException("HBase directory '" + p + "' creation failure."); if (!fs.mkdirs(p)) { throw new IOException("HBase directory '" + p + "' creation failure."); if (isSecurityEnabled && !dirPerms.equals(fs.getFileStatus(p).getPermission())) { + p.toString() + " permissions=" + fs.getFileStatus(p).getPermission() + ", expecting " + dirPerms + ". Automatically setting the permissions. " + "You can change the permissions by setting \"" + dirPermsConfName + "\" in hbase-site.xml "
static FsPermission addExecutePermissionToOwner(FsPermission fsPermission) { FsAction newOwnerAction = fsPermission.getUserAction().or(FsAction.EXECUTE); return new FsPermission(newOwnerAction, fsPermission.getGroupAction(), fsPermission.getOtherAction()); }
/** * Create from unmasked mode and umask. * * If the mode is already an FsCreateModes object, return it. */ public static FsPermission applyUMask(FsPermission mode, FsPermission umask) { if (mode.getUnmasked() != null) { return mode; } return create(mode.applyUMask(umask), mode); }
/** * Deserializes a {@link FsPermission}s object that should be used when a {@link DataWriter} is creating directories. */ public static FsPermission deserializeWriterDirPermissions(State state, int numBranches, int branchId) { return new FsPermission(state.getPropAsShortWithRadix( ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_DIR_PERMISSIONS, numBranches, branchId), FsPermission.getDefault().toShort(), ConfigurationKeys.PERMISSION_PARSING_RADIX)); }
@Test(enabled = false) public void testCreate7() throws IOException, URISyntaxException { HDFSRoot hdfsRoot = new HDFSRoot("/tmp/create"); MetricsFileSystemInstrumentation fs = (MetricsFileSystemInstrumentation) FileSystem.get(new URI(instrumentedURI), new Configuration()); FsPermission permission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.READ); Path newFile = new Path("/tmp/create/newFile"); FSDataOutputStream fstream = fs.create(newFile, permission, true, 100, (short)2, 1048576, null); Assert.assertEquals(fs.createTimer.getCount(), 1); fstream.close(); hdfsRoot.cleanupRoot(); }
srcConf.setBoolean("fs." + inputRoot.toUri().getScheme() + ".impl.disable.cache", true); FileSystem inputFs = FileSystem.get(inputRoot.toUri(), srcConf); LOG.debug("inputFs=" + inputFs.getUri().toString() + " inputRoot=" + inputRoot); Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); destConf.setBoolean("fs." + outputRoot.toUri().getScheme() + ".impl.disable.cache", true); FileSystem outputFs = FileSystem.get(outputRoot.toUri(), destConf); LOG.debug("outputFs=" + outputFs.getUri().toString() + " outputRoot=" + outputRoot.toString()); boolean skipTmp = conf.getBoolean(CONF_SKIP_TMP, false) || conf.get(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR) != null; outputFs.setPermission(new Path(initialOutputSnapshotDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), new FsPermission((short)filesMode));
throw new RuntimeException("Error cacheFile path format " + path); pathRemote = new Path(paths[0]); } else { pathRemote = new Path(path); if (!pathRemote.getFileSystem(conf).exists(pathRemote)) { throw new IOException("cacheFile path " + pathRemote + " not existed!"); pathRemote = new Path(path); if (!pathRemote.getFileSystem(conf).exists(pathRemote)) { throw new IOException("cacheArchive path " + pathRemote + " not existed!"); .getRemotePath(conf, applicationId, XLearningConstants.XLEARNING_JOB_CONFIGURATION); FSDataOutputStream out = FileSystem.create(jobConfPath.getFileSystem(conf), jobConfPath, new FsPermission(JOB_FILE_PERMISSION)); conf.writeXml(out); out.close(); Map<String, LocalResource> localResources = new HashMap<>(); localResources.put(XLearningConstants.XLEARNING_JOB_CONFIGURATION, .getRemotePath(conf, applicationId, "").toString()); appMasterEnv.put(XLearningConstants.Environment.APP_JAR_LOCATION.toString(), appJarDst.toUri().toString()); appMasterEnv.put(XLearningConstants.Environment.XLEARNING_JOB_CONF_LOCATION.toString(), jobConfPath.toString());
Path destination = new Path(new Path(new Path("/", destinationExistingToken), destinationAdditionalTokens), fileName); Path destinationWithoutLeadingSeparator = new Path(new Path(destinationExistingToken, destinationAdditionalTokens), fileName); Path originFile = new Path(tmpPath, fileName); this.fs.createNewFile(originFile); Path stagingDir = new Path(tmpPath, "staging"); this.fs.mkdirs(stagingDir); Path outputDir = new Path(tmpPath, "output"); this.fs.mkdirs(outputDir); FileStatus status = this.fs.getFileStatus(originFile); FsPermission readWrite = new FsPermission(FsAction.READ_WRITE, FsAction.READ_WRITE, FsAction.READ_WRITE); FsPermission dirReadWrite = new FsPermission(FsAction.ALL, FsAction.READ_WRITE, FsAction.READ_WRITE); OwnerAndPermission ownerAndPermission = new OwnerAndPermission(status.getOwner(), status.getGroup(), readWrite); List<OwnerAndPermission> ancestorOwnerAndPermissions = Lists.newArrayList(); ancestorOwnerAndPermissions.add(ownerAndPermission); CopyConfiguration.builder(FileSystem.getLocal(new Configuration()), properties).publishDir(new Path("/target")) .preserve(PreserveAttributes.fromMnemonicString("")).build()) .destinationOwnerAndPermission(ownerAndPermission) this.fs.mkdirs(existingOutputPath); FileStatus fileStatus = this.fs.getFileStatus(existingOutputPath); FsPermission existingPathPermission = fileStatus.getPermission();
final FileSystem fs = FileSystem.get(yarnConfiguration); final Path homeDir = fs.getHomeDirectory(); fs.getScheme().startsWith("file")) { LOG.warn("The file system scheme is '" + fs.getScheme() + "'. This indicates that the " + "specified Hadoop configuration path is wrong and the system is using the default Hadoop configuration values." fs, appId, new Path(tmpConfigurationFile.getAbsolutePath()), localResources, homeDir, fs, appId, new Path(fp.toURI()), localResources, homeDir, FsPermission permission = new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE); fs.setPermission(yarnFilesDir, permission); // set permission for path. appMasterEnv.put(YarnConfigKeys.ENV_DETACHED, String.valueOf(detached)); appMasterEnv.put(YarnConfigKeys.ENV_ZOOKEEPER_NAMESPACE, getZookeeperNamespace()); appMasterEnv.put(YarnConfigKeys.FLINK_YARN_FILES, yarnFilesDir.toUri().toString());
LOGGER.info("Starting {}", getClass().getSimpleName()); FileSystem fs = FileSystem.get(getConf()); Path inputPathPattern = new Path(_inputSegmentDir); Path stagingDir = new Path(_stagingDir); Path outputDir = new Path(_outputDir); if (fs.exists(outputDir)) { LOGGER.warn("Found the output folder {}, deleting it", _outputDir); fs.delete(outputDir, true); fs.mkdirs(outputDir); FsPermission umask = new FsPermission(_defaultPermissionsMask); FsPermission permission = FsPermission.getDirDefault().applyUMask(umask); FileStatus[] fileStatusArr = fs.globStatus(inputPathPattern); for (FileStatus fileStatus : fileStatusArr) { inputDataFiles.addAll(getDataFilesFromPath(fs, fileStatus.getPath())); job.getConfiguration().set("mapreduce.job.credentials.binary", System.getenv("HADOOP_TOKEN_FILE_LOCATION")); FileOutputFormat.setOutputPath(job, new Path(_stagingDir + "/output/")); job.getConfiguration().setInt(JobContext.NUM_MAPS, inputDataFiles.size()); if (_dataSchema != null) { job.getConfiguration().set(JobConfigConstants.SCHEMA, _dataSchema.toString());
@Test public void testResolveOwnerAndPermission() throws Exception { Path path = new Path("/test/path"); FileStatus fileStatus = new FileStatus(1, false, 0, 0, 0, 0, FsPermission.getDefault(), "owner", "group", path); Mockito.doReturn(fileStatus).when(fs).getFileStatus(path); Mockito.doReturn(path).when(fs).makeQualified(path); Mockito.doReturn(new URI("hdfs://uri")).when(fs).getUri(); Assert.assertEquals(ownerAndPermission.getOwner(), "owner"); Assert.assertEquals(ownerAndPermission.getGroup(), "group"); Assert.assertEquals(ownerAndPermission.getFsPermission(), FsPermission.getDefault());
Path workDir; try { workDir = new Path(client.pwd()); } catch (SftpException e) { throw new IOException(e); String pathName = absolute.getName(); if (!exists(client, absolute)) { Path parent = absolute.getParent(); created = (parent == null || mkdirs(client, parent, FsPermission.getDefault())); if (created) { String parentDir = parent.toUri().getPath(); boolean succeeded = true; try {
Configuration conf = new Configuration(); conf.set(key, (String) pair.getValue()); this.fileSystem = FileSystem.get(conf); Path tmpPath = new Path(tmpLocation); if (!fileSystem.exists(tmpPath)) { fileSystem.mkdirs(tmpPath, perm); Log.info("Created tmpPath " + tmpPath + " with permissions " + perm + " and umask " + getUmask(conf)); if (!fileSystem.getFileStatus(tmpPath).getPermission().equals(perm)) { log.error(String.format("Wrong permission for %s. Expects %s, actual %s", tmpPath, perm, fileSystem .getFileStatus(tmpPath).getPermission())); fileSystem.setPermission(tmpPath, perm); Path fromLocationPath = new Path(fromLocation); new WhiteBlackListPathFilter(whitelist, blacklist, fileSystem.getFileStatus(fromLocationPath).getPath()), sourceSubdir, fileSystem); for (FileStatus topic : topics.keySet()) { Path destinationPath = new Path(destLocation + "/" + topics.get(topic).replace(".", "/") + "/" + destSubdir); try { runCollectorForTopicDir(fileSystem, topicFullName, new Path(topic.getPath(), sourceSubdir), destinationPath);
/** * Tests that HdfsUtils#setFullFileStatus * does not thrown an exception when setting permissions and with recursion. */ @Test public void testSetFullFileStatusFailInheritPermsRecursive() throws Exception { Configuration conf = new Configuration(); conf.set("dfs.namenode.acls.enabled", "false"); Path fakeTarget = new Path("fakePath"); HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class); FileStatus mockSourceStatus = mock(FileStatus.class); FsShell mockFsShell = mock(FsShell.class); when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777)); when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus); doThrow(RuntimeException.class).when(mockFsShell).run(any(String[].class)); HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "", mock(FileSystem.class), fakeTarget, true, mockFsShell); verify(mockFsShell).run(new String[]{"-chmod", "-R", any(String.class), fakeTarget.toString()}); } }
/** * Find the test compressed file <code><filePath/code> in classpath and read it as a {@link FileAwareInputStream} */ private FileAwareInputStream getCompressedInputStream(final String filePath, final String newFileName) throws Exception { UnGzipConverter converter = new UnGzipConverter(); FileSystem fs = FileSystem.getLocal(new Configuration()); String fullPath = getClass().getClassLoader().getResource(filePath).getFile(); FileStatus status = fs.getFileStatus(testTempPath); OwnerAndPermission ownerAndPermission = new OwnerAndPermission(status.getOwner(), status.getGroup(), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); CopyableFile cf = CopyableFileUtils.getTestCopyableFile(filePath, new Path(testTempPath, newFileName).toString(), newFileName, ownerAndPermission); FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder().file(cf) .inputStream(fs.open(new Path(fullPath))).build(); Iterable<FileAwareInputStream> iterable = converter.convertRecord("outputSchema", fileAwareInputStream, new WorkUnitState()); return Iterables.getFirst(iterable, null); }
@Override public UfsDirectoryStatus getDirectoryStatus(String path) throws IOException { Path tPath = new Path(path); FileSystem hdfs = getFs(); FileStatus fs = hdfs.getFileStatus(tPath); return new UfsDirectoryStatus(path, fs.getOwner(), fs.getGroup(), fs.getPermission().toShort(), fs.getModificationTime()); }
@Test public void rootReadWriteExecute() throws IOException, LoginException { UserGroupInformation ugi = SecurityUtils.getUGI(); FileSystem fs = FileSystem.get(new Configuration()); String old = fs.getConf().get("dfs.permissions.supergroup"); try { fs.getConf().set("dfs.permissions.supergroup", ugi.getPrimaryGroupName()); Path p = createFile(fs, new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.NONE)); HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.READ, ugi); HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.WRITE, ugi); HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.EXECUTE, ugi); } finally { fs.getConf().set("dfs.permissions.supergroup", old); } }
private void addToLocalResources(FileSystem fs, String fileSrcPath, String fileDstPath, String appId, Map<String, LocalResource> localResources, String resources) throws IOException { String suffix = jstormClientContext.appName + JOYConstants.BACKLASH + appId + JOYConstants.BACKLASH + fileDstPath; Path dst = new Path(fs.getHomeDirectory(), suffix); if (fileSrcPath == null) { FSDataOutputStream ostream = null; try { ostream = FileSystem .create(fs, dst, new FsPermission(JOYConstants.FS_PERMISSION)); ostream.writeUTF(resources); } finally { IOUtils.closeQuietly(ostream); } } else { fs.copyFromLocalFile(new Path(fileSrcPath), dst); } FileStatus scFileStatus = fs.getFileStatus(dst); LocalResource scRsrc = LocalResource.newInstance( ConverterUtils.getYarnUrlFromURI(dst.toUri()), LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, scFileStatus.getLen(), scFileStatus.getModificationTime()); localResources.put(fileDstPath, scRsrc); }