@Override public Path getHomeDirectory() { if (cachedHomeDirectory == null) { final HttpOpParam.Op op = GetOpParam.Op.GETHOMEDIRECTORY; try { String pathFromDelegatedFS = new FsPathResponseRunner<String>(op, null, new UserParam(ugi)) { @Override String decodeResponse(Map<?, ?> json) throws IOException { return JsonUtilClient.getPath(json); } } .run(); cachedHomeDirectory = new Path(pathFromDelegatedFS).makeQualified( this.getUri(), null); } catch (IOException e) { LOG.error("Unable to get HomeDirectory from original File System", e); cachedHomeDirectory = new Path("/user/" + ugi.getShortUserName()) .makeQualified(this.getUri(), null); } } return cachedHomeDirectory; }
@Override public Path getTrashRoot(Path path) { statistics.incrementReadOps(1); storageStatistics.incrementOpCounter(OpType.GET_TRASH_ROOT); final HttpOpParam.Op op = GetOpParam.Op.GETTRASHROOT; try { String strTrashPath = new FsPathResponseRunner<String>(op, path) { @Override String decodeResponse(Map<?, ?> json) throws IOException { return JsonUtilClient.getPath(json); } }.run(); return new Path(strTrashPath).makeQualified(getUri(), null); } catch(IOException e) { LOG.warn("Cannot find trash root of " + path, e); // keep the same behavior with dfs return super.getTrashRoot(path).makeQualified(getUri(), null); } }
@Override public FileStatus getFileStatus(Path f) throws IOException { statistics.incrementReadOps(1); storageStatistics.incrementOpCounter(OpType.GET_FILE_STATUS); return getHdfsFileStatus(f).makeQualified(getUri(), f); }
/** * Get {@link FileStatus} of files/directories in the given path. If path * corresponds to a file then {@link FileStatus} of that file is returned. * Else if path represents a directory then {@link FileStatus} of all * files/directories inside given path is returned. * * @param f given path * @return the statuses of the files/directories in the given path */ @Override public FileStatus[] listStatus(final Path f) throws IOException { statistics.incrementReadOps(1); storageStatistics.incrementOpCounter(OpType.LIST_STATUS); final URI fsUri = getUri(); final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS; return new FsPathResponseRunner<FileStatus[]>(op, f) { @Override FileStatus[] decodeResponse(Map<?,?> json) { HdfsFileStatus[] hdfsStatuses = JsonUtilClient.toHdfsFileStatusArray(json); final FileStatus[] statuses = new FileStatus[hdfsStatuses.length]; for (int i = 0; i < hdfsStatuses.length; i++) { statuses[i] = hdfsStatuses[i].makeQualified(fsUri, f); } return statuses; } }.run(); }
private FileStatus makeQualified(HdfsFileStatus f, Path parent) { return new FileStatus(f.getLen(), f.isDir(), f.getReplication(), f.getBlockSize(), f.getModificationTime(), f.getAccessTime(), f.getPermission(), f.getOwner(), f.getGroup(), f.isSymlink() ? new Path(f.getSymlink()) : null, f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory())); }
@Override public URI getKeyProviderUri() throws IOException { String keyProviderUri = null; try { keyProviderUri = getServerDefaults().getKeyProviderUri(); } catch (UnsupportedOperationException e) { // This means server doesn't support GETSERVERDEFAULTS call. // Do nothing, let keyProviderUri = null. } catch (RemoteException e) { if (e.getClassName() != null && e.getClassName().equals("java.lang.IllegalArgumentException")) { // See HDFS-13100. // This means server doesn't support GETSERVERDEFAULTS call. // Do nothing, let keyProviderUri = null. } else { throw e; } } return HdfsKMSUtil.getKeyProviderUri(ugi, getUri(), keyProviderUri, getConf()); }
final URI fsUri = getUri(); final HdfsFileStatus[] statuses = listing.getPartialListing(); FileStatus[] qualified = new FileStatus[statuses.length];
private FileStatus makeQualified(HdfsFileStatus f, Path parent) { return new FileStatus(f.getLen(), f.isDir(), f.getReplication(), f.getBlockSize(), f.getModificationTime(), f.getAccessTime(), f.getPermission(), f.getOwner(), f.getGroup(), f.isSymlink() ? new Path(f.getSymlink()) : null, f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory())); }
/** * Expect read timeout, because the bogus server never sends a reply. */ @Test(timeout=TEST_TIMEOUT) public void testReadTimeout() throws Exception { try { fs.listFiles(new Path("/"), false); fail("expected timeout"); } catch (SocketTimeoutException e) { GenericTestUtils.assertExceptionContains(fs.getUri().getAuthority() + ": Read timed out", e); } }
/** * Expect connect timeout, because the connection backlog is consumed. */ @Test(timeout=TEST_TIMEOUT) public void testConnectTimeout() throws Exception { consumeConnectionBacklog(); try { fs.listFiles(new Path("/"), false); fail("expected timeout"); } catch (SocketTimeoutException e) { GenericTestUtils.assertExceptionContains(fs.getUri().getAuthority() + ": connect timed out",e); } }
/** * Expect read timeout on a URL that requires auth, because the bogus server * never sends a reply. */ @Test(timeout=TEST_TIMEOUT) public void testAuthUrlReadTimeout() throws Exception { try { fs.getDelegationToken("renewer"); fail("expected timeout"); } catch (SocketTimeoutException e) { GenericTestUtils.assertExceptionContains( fs.getUri().getAuthority() + ": Read timed out", e); } }
/** * After a redirect, expect connect timeout accessing the redirect location, * because the connection backlog is consumed. */ @Test(timeout=TEST_TIMEOUT) public void testRedirectConnectTimeout() throws Exception { startSingleTemporaryRedirectResponseThread(true); try { fs.getFileChecksum(new Path("/file")); fail("expected timeout"); } catch (SocketTimeoutException e) { GenericTestUtils.assertExceptionContains( fs.getUri().getAuthority() + ": connect timed out", e); } }
@Test public void testWebHdfsCustomDefaultPorts() throws IOException { URI uri = URI.create("webhdfs://localhost"); WebHdfsFileSystem fs = (WebHdfsFileSystem) FileSystem.get(uri, conf); assertEquals(123, fs.getDefaultPort()); assertEquals(uri, fs.getUri()); assertEquals("127.0.0.1:123", fs.getCanonicalServiceName()); }
/** * After a redirect, expect read timeout accessing the redirect location, * because the bogus server never sends a reply. */ @Test(timeout=TEST_TIMEOUT) public void testRedirectReadTimeout() throws Exception { startSingleTemporaryRedirectResponseThread(false); try { fs.getFileChecksum(new Path("/file")); fail("expected timeout"); } catch (SocketTimeoutException e) { GenericTestUtils.assertExceptionContains( fs.getUri().getAuthority() + ": Read timed out", e); } }
@Test public void testWebHdfsCustomUriPortWithCustomDefaultPorts() throws IOException { URI uri = URI.create("webhdfs://localhost:789"); WebHdfsFileSystem fs = (WebHdfsFileSystem) FileSystem.get(uri, conf); assertEquals(123, fs.getDefaultPort()); assertEquals(uri, fs.getUri()); assertEquals("127.0.0.1:789", fs.getCanonicalServiceName()); }
/** * Expect connect timeout on a URL that requires auth, because the connection * backlog is consumed. */ @Test(timeout=TEST_TIMEOUT) public void testAuthUrlConnectTimeout() throws Exception { consumeConnectionBacklog(); try { fs.getDelegationToken("renewer"); fail("expected timeout"); } catch (SocketTimeoutException e) { GenericTestUtils.assertExceptionContains(fs.getUri().getAuthority() + ": connect timed out", e); } }
/** * On the second step of two-step write, expect connect timeout accessing the * redirect location, because the connection backlog is consumed. */ @Test(timeout=TEST_TIMEOUT) public void testTwoStepWriteConnectTimeout() throws Exception { startSingleTemporaryRedirectResponseThread(true); OutputStream os = null; try { os = fs.create(new Path("/file")); fail("expected timeout"); } catch (SocketTimeoutException e) { GenericTestUtils.assertExceptionContains( fs.getUri().getAuthority() + ": connect timed out", e); } finally { IOUtils.cleanup(LOG, os); } }
Assert.assertEquals(webhdfs.getUri() + "/user/" + PROXY_USER, responsePath.toString());