@Test(expected = IOException.class) public void testBackupPathIsNotAccessible() throws Exception { Path path = new Path(PERMISSION_TEST_PATH); FileSystem rootFs = FileSystem.get(TEST_UTIL.getConnection().getConfiguration()); rootFs.mkdirs(path.getParent()); rootFs.setPermission(path.getParent(), FsPermission.createImmutable((short) 000)); FileSystem fs = DFSTestUtil.getFileSystemAs(DIANA, TEST_UTIL.getConnection().getConfiguration()); fs.mkdirs(path); } }
private void login(UserGroupInformation ugi) throws IOException, InterruptedException { if (fs != null) { fs.close(); } fs = DFSTestUtil.getFileSystemAs(ugi, conf); }
/** * Creates a FileSystem for a specific user. * * @param user UserGroupInformation specific user * @return FileSystem for specific user * @throws Exception if creation fails */ protected FileSystem createFileSystem(UserGroupInformation user) throws Exception { return DFSTestUtil.getFileSystemAs(user, conf); }
/** * Returns the reference to a new instance of FileSystem created * with different user name * @param conf current Configuration * @return FileSystem instance * @throws IOException * @throws InterruptedException */ public static FileSystem createHdfsWithDifferentUsername(final Configuration conf ) throws IOException, InterruptedException { String username = UserGroupInformation.getCurrentUser().getShortUserName()+"_XXX"; UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, new String[]{"supergroup"}); return DFSTestUtil.getFileSystemAs(ugi, conf); }
private void login(UserGroupInformation ugi) throws IOException, InterruptedException { if (fs != null) { fs.close(); } fs = DFSTestUtil.getFileSystemAs(ugi, conf); }
/** * Ensure that even if a file is in a directory with the sticky bit on, * another user can write to that file (assuming correct permissions). */ private void confirmCanAppend(Configuration conf, FileSystem hdfs, Path baseDir) throws IOException, InterruptedException { // Create a tmp directory with wide-open permissions and sticky bit Path p = new Path(baseDir, "tmp"); hdfs.mkdirs(p); hdfs.setPermission(p, new FsPermission((short) 01777)); // Write a file to the new tmp directory as a regular user hdfs = DFSTestUtil.getFileSystemAs(user1, conf); Path file = new Path(p, "foo"); writeFile(hdfs, file); hdfs.setPermission(file, new FsPermission((short) 0777)); // Log onto cluster as another user and attempt to append to file hdfs = DFSTestUtil.getFileSystemAs(user2, conf); Path file2 = new Path(p, "foo"); FSDataOutputStream h = hdfs.append(file2); h.write("Some more data".getBytes()); h.close(); }
/** * Test that one user can't delete another user's file when the sticky bit is * set. */ private void confirmDeletingFiles(Configuration conf, FileSystem hdfs, Path baseDir) throws IOException, InterruptedException { Path p = new Path(baseDir, "contemporary"); hdfs.mkdirs(p); hdfs.setPermission(p, new FsPermission((short) 01777)); // Write a file to the new temp directory as a regular user hdfs = DFSTestUtil.getFileSystemAs(user1, conf); Path file = new Path(p, "foo"); writeFile(hdfs, file); // Make sure the correct user is the owner assertEquals(user1.getShortUserName(), hdfs.getFileStatus(file).getOwner()); // Log onto cluster as another user and attempt to delete the file FileSystem hdfs2 = DFSTestUtil.getFileSystemAs(user2, conf); try { hdfs2.delete(file, false); fail("Shouldn't be able to delete someone else's file with SB on"); } catch (IOException ioe) { assertTrue(ioe instanceof AccessControlException); assertTrue(ioe.getMessage().contains("sticky bit")); } }
/** * Creates a FileSystem for a specific user. * * @param user UserGroupInformation specific user * @return FileSystem for specific user * @throws Exception if creation fails */ protected FileSystem createFileSystem(UserGroupInformation user) throws Exception { return DFSTestUtil.getFileSystemAs(user, cluster.getConfiguration(0)); }
/** * Returns the reference to a new instance of FileSystem created * with different user name * @param conf current Configuration * @return FileSystem instance * @throws IOException * @throws InterruptedException */ public static FileSystem createHdfsWithDifferentUsername(final Configuration conf ) throws IOException, InterruptedException { String username = UserGroupInformation.getCurrentUser().getShortUserName()+"_XXX"; UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, new String[]{"supergroup"}); return DFSTestUtil.getFileSystemAs(ugi, conf); }
static void checkFile(Path p, int expectedsize, final Configuration conf ) throws IOException, InterruptedException { //open the file with another user account final String username = UserGroupInformation.getCurrentUser().getShortUserName() + "_" + ++userCount; UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, new String[] {"supergroup"}); final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf); final DFSDataInputStream in = (DFSDataInputStream)fs.open(p); //Check visible length Assert.assertTrue(in.getVisibleLength() >= expectedsize); //Able to read? for(int i = 0; i < expectedsize; i++) { Assert.assertEquals((byte)i, (byte)in.read()); } in.close(); }
private static FileSystem createFsWithDifferentUsername( final Configuration conf, final boolean isWebHDFS ) throws IOException, InterruptedException { final String username = UserGroupInformation.getCurrentUser( ).getShortUserName() + "_XXX"; final UserGroupInformation ugi = UserGroupInformation.createUserForTesting( username, new String[]{"supergroup"}); return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME) : DFSTestUtil.getFileSystemAs(ugi, conf); }
FileSystem hdfs2 = DFSTestUtil.getFileSystemAs(user1, conf); FileSystem hdfs3 = DFSTestUtil.getFileSystemAs(user2, conf);
/** test that allowed operation puts proper entry in audit log */ @Test public void testAuditAllowed() throws Exception { final Path file = new Path(fnames[0]); FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf); setupAuditLogs(); InputStream istream = userfs.open(file); int val = istream.read(); istream.close(); verifyAuditLogs(true); assertTrue("failed to read from file", val >= 0); }
/** test that allowed stat puts proper entry in audit log */ @Test public void testAuditAllowedStat() throws Exception { final Path file = new Path(fnames[0]); FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf); setupAuditLogs(); FileStatus st = userfs.getFileStatus(file); verifyAuditLogs(true); assertTrue("failed to stat file", st != null && st.isFile()); }
/** test that denied operation puts proper entry in audit log */ @Test public void testAuditDenied() throws Exception { final Path file = new Path(fnames[0]); FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf); fs.setPermission(file, new FsPermission((short)0600)); fs.setOwner(file, "root", null); setupAuditLogs(); try { userfs.open(file); fail("open must not succeed"); } catch(AccessControlException e) { System.out.println("got access denied, as expected."); } verifyAuditLogs(false); }
/** * Initialize the cluster, wait for it to become active, and get FileSystem * instances for our test users. * * @param format if true, format the NameNode and DataNodes before starting up * @throws Exception if any step fails */ private static void initCluster(boolean format) throws Exception { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format) .build(); cluster.waitActive(); hdfs = cluster.getFileSystem(); fsAsBruce = DFSTestUtil.getFileSystemAs(BRUCE, conf); fsAsDiana = DFSTestUtil.getFileSystemAs(DIANA, conf); }
static void checkFile(Path p, int expectedsize, final Configuration conf ) throws IOException, InterruptedException { //open the file with another user account final String username = UserGroupInformation.getCurrentUser().getShortUserName() + "_" + ++userCount; UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, new String[] {"supergroup"}); final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf); final HdfsDataInputStream in = (HdfsDataInputStream)fs.open(p); //Check visible length Assert.assertTrue(in.getVisibleLength() >= expectedsize); //Able to read? for(int i = 0; i < expectedsize; i++) { Assert.assertEquals((byte)i, (byte)in.read()); } in.close(); }
private static void initCluster(boolean format) throws Exception { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(format) .build(); hdfs = cluster.getFileSystem(); assertTrue(hdfs instanceof DistributedFileSystem); hdfsAsUser1 = DFSTestUtil.getFileSystemAs(user1, conf); assertTrue(hdfsAsUser1 instanceof DistributedFileSystem); hdfsAsUser2 = DFSTestUtil.getFileSystemAs(user2, conf); assertTrue(hdfsAsUser2 instanceof DistributedFileSystem); }
"user1", new String[] { "group1" }); DistributedFileSystem fs1 = (DistributedFileSystem) DFSTestUtil .getFileSystemAs(ugi1, conf); Path dir1_user1 = new Path("/dir1_user1"); Path dir2_user1 = new Path("/dir2_user1"); "user2", new String[] { "group2" }); DistributedFileSystem fs2 = (DistributedFileSystem) DFSTestUtil .getFileSystemAs(ugi2, conf); Path dir_user2 = new Path("/dir_user2"); Path subdir_user2 = new Path(dir_user2, "subdir"); "superuser", new String[] { supergroup }); DistributedFileSystem fs3 = (DistributedFileSystem) DFSTestUtil .getFileSystemAs(superUgi, conf);
.createRemoteUser("myuser"); final DistributedFileSystem myDfs = (DistributedFileSystem)DFSTestUtil.getFileSystemAs(myUser, conf); final String poolName = "poolparty"; dfs.addCachePool(new CachePoolInfo(poolName)