@Override public TreeMap<Long, BlockLocation> getLocationsWithOffset(FileSystem fs, FileStatus status) throws IOException { TreeMap<Long, BlockLocation> offsetBlockMap = new TreeMap<Long, BlockLocation>(); BlockLocation[] locations = getLocations(fs, status); for (BlockLocation location : locations) { offsetBlockMap.put(location.getOffset(), location); } return offsetBlockMap; }
/** * Load the fair scheduler queue for given user if available. */ @Override public void refreshDefaultQueue(Configuration conf, String userName) throws IOException { if (StringUtils.isNotBlank(userName) && isFairScheduler(conf)) { ShimLoader.getSchedulerShims().refreshDefaultQueue(conf, userName); } }
@Override public HadoopShims.HdfsEncryptionShim createHdfsEncryptionShim(FileSystem fs, Configuration conf) throws IOException { if (isHdfsEncryptionSupported()) { URI uri = fs.getUri(); if ("hdfs".equals(uri.getScheme())) { return new HdfsEncryptionShim(uri, conf); } } return new HadoopShims.NoopHdfsEncryptionShim(); }
FsShell fsShell = new FsShell(); fsShell.setConf(conf); run(fsShell, new String[]{"-chgrp", "-R", group, target.toString()}); if (isExtendedAclEnabled(conf)) { if (aclStatus != null) { List<AclEntry> aclEntries = aclStatus.getEntries(); removeBaseAclEntries(aclEntries); aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.USER, sourcePerm.getUserAction())); aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.GROUP, sourcePerm.getGroupAction())); aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, sourcePerm.getOtherAction())); run(fsShell, new String[]{"-setfacl", "-R", "--set", aclEntry, target.toString()}); run(fsShell, new String[]{"-chmod", "-R", permission, target.toString()}); getFullFileStatus(conf, fs, target).debugLog();
@Override public void checkFileAccess(FileSystem fs, FileStatus stat, FsAction action) throws IOException, AccessControlException, Exception { try { if (accessMethod == null) { // Have to rely on Hive implementation of filesystem permission checks. DefaultFileAccess.checkFileAccess(fs, stat, action); } else { accessMethod.invoke(fs, stat.getPath(), action); } } catch (Exception err) { throw wrapAccessException(err); } }
String[] racks, boolean isHA) throws IOException { configureImpersonation(conf); MiniDFSCluster miniDFSCluster; if (isHA) { if (keyProvider != null) { try { setKeyProvider(miniDFSCluster.getFileSystem(0).getClient(), keyProvider); } catch (Exception err) { throw new IOException(err);
@Override public HadoopShims.MiniDFSShim getMiniDfs(Configuration conf, int numDataNodes, boolean format, String[] racks) throws IOException { configureImpersonation(conf); MiniDFSCluster miniDFSCluster = new MiniDFSCluster(conf, numDataNodes, format, racks); // Need to set the client's KeyProvider to the NN's for JKS, // else the updates do not get flushed properly KeyProviderCryptoExtension keyProvider = miniDFSCluster.getNameNode().getNamesystem().getProvider(); if (keyProvider != null) { miniDFSCluster.getFileSystem().getClient().setKeyProvider(keyProvider); } cluster = new MiniDFSShim(miniDFSCluster); return cluster; }
@Override public HdfsFileStatus getFullFileStatus(Configuration conf, FileSystem fs, Path file) throws IOException { FileStatus fileStatus = fs.getFileStatus(file); AclStatus aclStatus = null; if (isExtendedAclEnabled(conf)) { //Attempt extended Acl operations only if its enabled, but don't fail the operation regardless. try { aclStatus = fs.getAclStatus(file); } catch (Exception e) { LOG.info("Skipping ACL inheritance: File system for path " + file + " " + "does not support ACLs but dfs.namenode.acls.enabled is set to true: " + e, e); } } return new Hadoop23FileStatus(fileStatus, aclStatus); }
FsShell fsShell = new FsShell(); fsShell.setConf(conf); run(fsShell, new String[]{"-chgrp", "-R", group, target.toString()}); if (isExtendedAclEnabled(conf)) { if (aclStatus != null) { List<AclEntry> aclEntries = aclStatus.getEntries(); removeBaseAclEntries(aclEntries); aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.USER, sourcePerm.getUserAction())); aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.GROUP, sourcePerm.getGroupAction())); aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, sourcePerm.getOtherAction())); run(fsShell, new String[]{"-setfacl", "-R", "--set", aclEntry, target.toString()}); run(fsShell, new String[]{"-chmod", "-R", permission, target.toString()}); getFullFileStatus(conf, fs, target).debugLog();
@Override public void checkFileAccess(FileSystem fs, FileStatus stat, FsAction action) throws IOException, AccessControlException, Exception { try { if (accessMethod == null) { // Have to rely on Hive implementation of filesystem permission checks. DefaultFileAccess.checkFileAccess(fs, stat, action); } else { accessMethod.invoke(fs, stat.getPath(), action); } } catch (Exception err) { throw wrapAccessException(err); } }
String[] racks, boolean isHA) throws IOException { configureImpersonation(conf); MiniDFSCluster miniDFSCluster; if (isHA) { if (keyProvider != null) { try { setKeyProvider(miniDFSCluster.getFileSystem(0).getClient(), keyProvider); } catch (Exception err) { throw new IOException(err);
@Override public HadoopShims.MiniDFSShim getMiniDfs(Configuration conf, int numDataNodes, boolean format, String[] racks) throws IOException { configureImpersonation(conf); MiniDFSCluster miniDFSCluster = new MiniDFSCluster(conf, numDataNodes, format, racks); // Need to set the client's KeyProvider to the NN's for JKS, // else the updates do not get flushed properly KeyProviderCryptoExtension keyProvider = miniDFSCluster.getNameNode().getNamesystem().getProvider(); if (keyProvider != null) { miniDFSCluster.getFileSystem().getClient().setKeyProvider(keyProvider); } cluster = new MiniDFSShim(miniDFSCluster); return cluster; }
@Override public HdfsFileStatus getFullFileStatus(Configuration conf, FileSystem fs, Path file) throws IOException { FileStatus fileStatus = fs.getFileStatus(file); AclStatus aclStatus = null; if (isExtendedAclEnabled(conf)) { //Attempt extended Acl operations only if its enabled, but don't fail the operation regardless. try { aclStatus = fs.getAclStatus(file); } catch (Exception e) { LOG.info("Skipping ACL inheritance: File system for path " + file + " " + "does not support ACLs but dfs.namenode.acls.enabled is set to true: " + e, e); } } return new Hadoop23FileStatus(fileStatus, aclStatus); }
FsShell fsShell = new FsShell(); fsShell.setConf(conf); run(fsShell, new String[]{"-chgrp", "-R", group, target.toString()}); if (isExtendedAclEnabled(conf)) { if (aclStatus != null) { List<AclEntry> aclEntries = aclStatus.getEntries(); removeBaseAclEntries(aclEntries); aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.USER, sourcePerm.getUserAction())); aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.GROUP, sourcePerm.getGroupAction())); aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, sourcePerm.getOtherAction())); run(fsShell, new String[]{"-setfacl", "-R", "--set", aclEntry, target.toString()}); run(fsShell, new String[]{"-chmod", "-R", permission, target.toString()}); getFullFileStatus(conf, fs, target).debugLog();
@Override public HadoopShims.HdfsEncryptionShim createHdfsEncryptionShim(FileSystem fs, Configuration conf) throws IOException { if (isHdfsEncryptionSupported()) { URI uri = fs.getUri(); if ("hdfs".equals(uri.getScheme())) { return new HdfsEncryptionShim(uri, conf); } } return new HadoopShims.NoopHdfsEncryptionShim(); }
/** * Load the fair scheduler queue for given user if available. */ @Override public void refreshDefaultQueue(Configuration conf, String userName) throws IOException { if (StringUtils.isNotBlank(userName) && isFairScheduler(conf)) { ShimLoader.getSchedulerShims().refreshDefaultQueue(conf, userName); } }
@Override public TreeMap<Long, BlockLocation> getLocationsWithOffset(FileSystem fs, FileStatus status) throws IOException { TreeMap<Long, BlockLocation> offsetBlockMap = new TreeMap<Long, BlockLocation>(); BlockLocation[] locations = getLocations(fs, status); for (BlockLocation location : locations) { offsetBlockMap.put(location.getOffset(), location); } return offsetBlockMap; }
@Override public void checkFileAccess(FileSystem fs, FileStatus stat, FsAction action) throws IOException, AccessControlException, Exception { try { if (accessMethod == null) { // Have to rely on Hive implementation of filesystem permission checks. DefaultFileAccess.checkFileAccess(fs, stat, action); } else { accessMethod.invoke(fs, stat.getPath(), action); } } catch (Exception err) { throw wrapAccessException(err); } }
@Override public HadoopShims.MiniDFSShim getMiniDfs(Configuration conf, int numDataNodes, boolean format, String[] racks) throws IOException { configureImpersonation(conf); MiniDFSCluster miniDFSCluster = new MiniDFSCluster(conf, numDataNodes, format, racks); // Need to set the client's KeyProvider to the NN's for JKS, // else the updates do not get flushed properly KeyProviderCryptoExtension keyProvider = miniDFSCluster.getNameNode().getNamesystem().getProvider(); if (keyProvider != null) { miniDFSCluster.getFileSystem().getClient().setKeyProvider(keyProvider); } cluster = new MiniDFSShim(miniDFSCluster); return cluster; }
@Override public HdfsFileStatus getFullFileStatus(Configuration conf, FileSystem fs, Path file) throws IOException { FileStatus fileStatus = fs.getFileStatus(file); AclStatus aclStatus = null; if (isExtendedAclEnabled(conf)) { //Attempt extended Acl operations only if its enabled, but don't fail the operation regardless. try { aclStatus = fs.getAclStatus(file); } catch (Exception e) { LOG.info("Skipping ACL inheritance: File system for path " + file + " " + "does not support ACLs but dfs.namenode.acls.enabled is set to true: " + e, e); } } return new Hadoop23FileStatus(fileStatus, aclStatus); }