public HadoopShims.HdfsEncryptionShim getHdfsEncryptionShim(FileSystem fs) throws HiveException { if (!hdfsEncryptionShims.containsKey(fs.getUri())) { try { if ("hdfs".equals(fs.getUri().getScheme())) { hdfsEncryptionShims.put(fs.getUri(), ShimLoader.getHadoopShims().createHdfsEncryptionShim(fs, sessionConf)); } else { LOG.info("Could not get hdfsEncryptionShim, it is only applicable to hdfs filesystem."); } } catch (Exception e) { throw new HiveException(e); } } return hdfsEncryptionShims.get(fs.getUri()); }
public HadoopShims.HdfsEncryptionShim getHdfsEncryptionShim(FileSystem fs) throws HiveException { if (!hdfsEncryptionShims.containsKey(fs.getUri())) { try { if ("hdfs".equals(fs.getUri().getScheme())) { hdfsEncryptionShims.put(fs.getUri(), ShimLoader.getHadoopShims().createHdfsEncryptionShim(fs, sessionConf)); } else { LOG.info("Could not get hdfsEncryptionShim, it is only applicable to hdfs filesystem."); } } catch (Exception e) { throw new HiveException(e); } } return hdfsEncryptionShims.get(fs.getUri()); }
runOptions.shouldModifyManagedTableLocation = false; } else { encryptionShim = ShimLoader.getHadoopShims().createHdfsEncryptionShim(oldWhRootFs, conf); if (!hasEquivalentEncryption(encryptionShim, oldWhRootPath, curWhRootPath)) { LOG.info("oldWarehouseRoot {} and current warehouse root {} have different encryption zones." +
= ShimLoader.getHadoopShims().createHdfsEncryptionShim(fs, conf); if (!shim.isPathEncrypted(location)) { HdfsUtils.HadoopFileStatus status = new HdfsUtils.HadoopFileStatus(conf, fs, location);
FileSystem fs = location.getFileSystem(conf); HadoopShims.HdfsEncryptionShim shim = ShimLoader.getHadoopShims().createHdfsEncryptionShim(fs, conf); if (!shim.isPathEncrypted(location)) { HdfsUtils.HadoopFileStatus status = new HdfsUtils.HadoopFileStatus(conf, fs, location);
private void setupFileSystem(HadoopShims shims) throws IOException { if (fsType == FsType.local) { fs = FileSystem.getLocal(conf); } else if (fsType == FsType.hdfs || fsType == FsType.encrypted_hdfs) { int numDataNodes = 4; if (fsType == FsType.encrypted_hdfs) { // Set the security key provider so that the MiniDFS cluster is initialized // with encryption conf.set(SECURITY_KEY_PROVIDER_URI_NAME, getKeyProviderURI()); conf.setInt("fs.trash.interval", 50); dfs = shims.getMiniDfs(conf, numDataNodes, true, null); fs = dfs.getFileSystem(); // set up the java key provider for encrypted hdfs cluster hes = shims.createHdfsEncryptionShim(fs, conf); LOG.info("key provider is initialized"); } else { dfs = shims.getMiniDfs(conf, numDataNodes, true, null); fs = dfs.getFileSystem(); } } else { throw new IllegalArgumentException("Unknown or unhandled fsType [" + fsType + "]"); } }
public HadoopShims.HdfsEncryptionShim getHdfsEncryptionShim() throws HiveException { if (hdfsEncryptionShim == null) { try { FileSystem fs = FileSystem.get(conf); if ("hdfs".equals(fs.getUri().getScheme())) { hdfsEncryptionShim = ShimLoader.getHadoopShims().createHdfsEncryptionShim(fs, conf); } else { LOG.info("Could not get hdfsEncryptionShim, it is only applicable to hdfs filesystem."); } } catch (Exception e) { throw new HiveException(e); } } return hdfsEncryptionShim; }
try { HadoopShims.HdfsEncryptionShim shim = ShimLoader.getHadoopShims().createHdfsEncryptionShim(FileSystem.get(hiveConf), hiveConf); if (shim.isPathEncrypted(pathToData)) { throw new MetaException("Unable to drop " + objectName + " because it is in an encryption zone" +
try { HadoopShims.HdfsEncryptionShim shim = ShimLoader.getHadoopShims().createHdfsEncryptionShim(FileSystem.get(hiveConf), hiveConf); if (shim.isPathEncrypted(pathToData)) { throw new MetaException("Unable to drop " + objectName + " because it is in an encryption zone" +