@Override public short getDefaultReplication() { return (short)getConf().getInt(HdfsClientConfigKeys.DFS_REPLICATION_KEY, HdfsClientConfigKeys.DFS_REPLICATION_DEFAULT); }
@Override public short getDefaultReplication() { return (short)getConf().getInt(DFSConfigKeys.DFS_REPLICATION_KEY, DFSConfigKeys.DFS_REPLICATION_DEFAULT); }
@Override public short getDefaultReplication() { return (short)getConf().getInt(DFSConfigKeys.DFS_REPLICATION_KEY, DFSConfigKeys.DFS_REPLICATION_DEFAULT); }
@Override @VisibleForTesting public int getDefaultPort() { return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT); }
@Override public long getDefaultBlockSize() { return getConf().getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT); }
@Override public long getDefaultBlockSize() { return getConf().getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT); }
@Override public long getDefaultBlockSize() { return getConf().getLongBytes(HdfsClientConfigKeys.DFS_BLOCK_SIZE_KEY, HdfsClientConfigKeys.DFS_BLOCK_SIZE_DEFAULT); }
@Override @VisibleForTesting public int getDefaultPort() { return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT); }
@Override public KeyProvider getKeyProvider() throws IOException { if (testProvider != null) { return testProvider; } URI keyProviderUri = getKeyProviderUri(); if (keyProviderUri == null) { return null; } return KMSUtil.createKeyProviderFromUri(getConf(), keyProviderUri); }
/** * Resolve an HDFS URL into real INetSocketAddress. It works like a DNS * resolver when the URL points to an non-HA cluster. When the URL points to * an HA cluster with its logical name, the resolver further resolves the * logical name(i.e., the authority in the URL) into real namenode addresses. */ private InetSocketAddress[] resolveNNAddr() throws IOException { Configuration conf = getConf(); final String scheme = uri.getScheme(); ArrayList<InetSocketAddress> ret = new ArrayList<InetSocketAddress>(); if (!HAUtil.isLogicalUri(conf, uri)) { InetSocketAddress addr = NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort()); ret.add(addr); } else { Map<String, Map<String, InetSocketAddress>> addresses = DFSUtil .getHaNnWebHdfsAddresses(conf, scheme); // Extract the entry corresponding to the logical name. Map<String, InetSocketAddress> addrs = addresses.get(uri.getHost()); for (InetSocketAddress addr : addrs.values()) { ret.add(addr); } } InetSocketAddress[] r = new InetSocketAddress[ret.size()]; return ret.toArray(r); }
/** * Resolve an HDFS URL into real INetSocketAddress. It works like a DNS * resolver when the URL points to an non-HA cluster. When the URL points to * an HA cluster with its logical name, the resolver further resolves the * logical name(i.e., the authority in the URL) into real namenode addresses. */ private InetSocketAddress[] resolveNNAddr() throws IOException { Configuration conf = getConf(); final String scheme = uri.getScheme(); ArrayList<InetSocketAddress> ret = new ArrayList<InetSocketAddress>(); if (!HAUtil.isLogicalUri(conf, uri)) { InetSocketAddress addr = NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort()); ret.add(addr); } else { Map<String, Map<String, InetSocketAddress>> addresses = DFSUtil .getHaNnWebHdfsAddresses(conf, scheme); // Extract the entry corresponding to the logical name. Map<String, InetSocketAddress> addrs = addresses.get(uri.getHost()); for (InetSocketAddress addr : addrs.values()) { ret.add(addr); } } InetSocketAddress[] r = new InetSocketAddress[ret.size()]; return ret.toArray(r); }
/** * Resolve an HDFS URL into real INetSocketAddress. It works like a DNS * resolver when the URL points to an non-HA cluster. When the URL points to * an HA cluster with its logical name, the resolver further resolves the * logical name(i.e., the authority in the URL) into real namenode addresses. */ private InetSocketAddress[] resolveNNAddr() { Configuration conf = getConf(); final String scheme = uri.getScheme(); ArrayList<InetSocketAddress> ret = new ArrayList<>(); if (!HAUtilClient.isLogicalUri(conf, uri)) { InetSocketAddress addr = NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort()); ret.add(addr); } else { Map<String, Map<String, InetSocketAddress>> addresses = DFSUtilClient .getHaNnWebHdfsAddresses(conf, scheme); // Extract the entry corresponding to the logical name. Map<String, InetSocketAddress> addrs = addresses.get(uri.getHost()); for (InetSocketAddress addr : addrs.values()) { ret.add(addr); } } InetSocketAddress[] r = new InetSocketAddress[ret.size()]; return ret.toArray(r); }
private FsPermission applyUMask(FsPermission permission) { if (permission == null) { permission = FsPermission.getDefault(); } return permission.applyUMask(FsPermission.getUMask(getConf())); }
private FsPermission applyUMask(FsPermission permission) { if (permission == null) { permission = FsPermission.getDefault(); } return permission.applyUMask(FsPermission.getUMask(getConf())); }
private FsPermission applyUMask(FsPermission permission) { if (permission == null) { permission = FsPermission.getDefault(); } return FsCreateModes.applyUMask(permission, FsPermission.getUMask(getConf())); }
InputStream createWrappedInputStream() throws IOException { return HdfsKMSUtil.createWrappedInputStream( this, getKeyProvider(), getFileEncryptionInfo(), getConf()); } }
@Override public URI getKeyProviderUri() throws IOException { String keyProviderUri = null; try { keyProviderUri = getServerDefaults().getKeyProviderUri(); } catch (UnsupportedOperationException e) { // This means server doesn't support GETSERVERDEFAULTS call. // Do nothing, let keyProviderUri = null. } catch (RemoteException e) { if (e.getClassName() != null && e.getClassName().equals("java.lang.IllegalArgumentException")) { // See HDFS-13100. // This means server doesn't support GETSERVERDEFAULTS call. // Do nothing, let keyProviderUri = null. } else { throw e; } } return HdfsKMSUtil.getKeyProviderUri(ugi, getUri(), keyProviderUri, getConf()); }