@Override public void setupConfiguration(Configuration conf) { Configuration config = mr.getConfig(); for (Map.Entry<String, String> pair : config) { conf.set(pair.getKey(), pair.getValue()); } Path jarPath = new Path("hdfs:///user/hive"); Path hdfsPath = new Path("hdfs:///user/"); try { FileSystem fs = cluster.getFileSystem(); jarPath = fs.makeQualified(jarPath); conf.set("hive.jar.directory", jarPath.toString()); fs.mkdirs(jarPath); hdfsPath = fs.makeQualified(hdfsPath); conf.set("hive.user.install.directory", hdfsPath.toString()); fs.mkdirs(hdfsPath); } catch (Exception e) { e.printStackTrace(); } } }
@Override public void setupConfiguration(Configuration conf) { Configuration config = mr.getConfig(); for (Map.Entry<String, String> pair: config) { conf.set(pair.getKey(), pair.getValue()); } Path jarPath = new Path("hdfs:///user/hive"); Path hdfsPath = new Path("hdfs:///user/"); try { FileSystem fs = cluster.getFileSystem(); jarPath = fs.makeQualified(jarPath); conf.set("hive.jar.directory", jarPath.toString()); fs.mkdirs(jarPath); hdfsPath = fs.makeQualified(hdfsPath); conf.set("hive.user.install.directory", hdfsPath.toString()); fs.mkdirs(hdfsPath); } catch (Exception e) { e.printStackTrace(); } } }
@Override public void setupConfiguration(Configuration conf) { Configuration config = mr.getConfig(); for (Map.Entry<String, String> pair : config) { conf.set(pair.getKey(), pair.getValue()); } Path jarPath = new Path("hdfs:///user/hive"); Path hdfsPath = new Path("hdfs:///user/"); try { FileSystem fs = cluster.getFileSystem(); jarPath = fs.makeQualified(jarPath); conf.set("hive.jar.directory", jarPath.toString()); fs.mkdirs(jarPath); hdfsPath = fs.makeQualified(hdfsPath); conf.set("hive.user.install.directory", hdfsPath.toString()); fs.mkdirs(hdfsPath); } catch (Exception e) { e.printStackTrace(); } } }
@Override public void setupConfiguration(Configuration conf) { Configuration config = mr.getConfig(); for (Map.Entry<String, String> pair: config) { conf.set(pair.getKey(), pair.getValue()); } Path jarPath = new Path("hdfs:///user/hive"); Path hdfsPath = new Path("hdfs:///user/"); try { FileSystem fs = cluster.getFileSystem(); jarPath = fs.makeQualified(jarPath); conf.set("hive.jar.directory", jarPath.toString()); fs.mkdirs(jarPath); hdfsPath = fs.makeQualified(hdfsPath); conf.set("hive.user.install.directory", hdfsPath.toString()); fs.mkdirs(hdfsPath); } catch (Exception e) { e.printStackTrace(); } } }
@Override public void setupConfiguration(Configuration conf) { Configuration config = mr.getConfig(); for (Map.Entry<String, String> pair : config) { conf.set(pair.getKey(), pair.getValue()); } Path jarPath = new Path("hdfs:///user/hive"); Path hdfsPath = new Path("hdfs:///user/"); try { FileSystem fs = cluster.getFileSystem(); jarPath = fs.makeQualified(jarPath); conf.set("hive.jar.directory", jarPath.toString()); fs.mkdirs(jarPath); hdfsPath = fs.makeQualified(hdfsPath); conf.set("hive.user.install.directory", hdfsPath.toString()); fs.mkdirs(hdfsPath); } catch (Exception e) { e.printStackTrace(); } } }
@Override public void setupConfiguration(Configuration conf) { Configuration config = mr.getConfig(); for (Map.Entry<String, String> pair: config) { conf.set(pair.getKey(), pair.getValue()); } Path jarPath = new Path("hdfs:///user/hive"); Path hdfsPath = new Path("hdfs:///user/"); try { FileSystem fs = cluster.getFileSystem(); jarPath = fs.makeQualified(jarPath); conf.set("hive.jar.directory", jarPath.toString()); fs.mkdirs(jarPath); hdfsPath = fs.makeQualified(hdfsPath); conf.set("hive.user.install.directory", hdfsPath.toString()); fs.mkdirs(hdfsPath); } catch (Exception e) { e.printStackTrace(); } } }
public void stop() { verifyStarted(); hiveServer2.stop(); setStarted(false); try { if (mr != null) { mr.shutdown(); mr = null; } if (dfs != null) { dfs.shutdown(); dfs = null; } } catch (IOException e) { // Ignore errors cleaning up miniMR } FileUtils.deleteQuietly(baseDir); }
public void shutdown() throws Exception { if (System.getenv(QTEST_LEAVE_FILES) == null) { cleanUp(); } if (clusterType.getCoreClusterType() == CoreClusterType.TEZ) { SessionState.get().getTezSession().destroy(); } setup.tearDown(); if (sparkSession != null) { try { SparkSessionManagerImpl.getInstance().closeSession(sparkSession); } catch (Exception ex) { LOG.error("Error closing spark session.", ex); } finally { sparkSession = null; } } if (mr != null) { mr.shutdown(); mr = null; } FileSystem.closeAll(); if (dfs != null) { dfs.shutdown(); dfs = null; } Hive.closeCurrent(); }
private void setupFileSystem(HadoopShims shims) throws IOException { if (fsType == FsType.local) { fs = FileSystem.getLocal(conf); } else if (fsType == FsType.hdfs || fsType == FsType.encrypted_hdfs) { int numDataNodes = 4; if (fsType == FsType.encrypted_hdfs) { // Set the security key provider so that the MiniDFS cluster is initialized // with encryption conf.set(SECURITY_KEY_PROVIDER_URI_NAME, getKeyProviderURI()); conf.setInt("fs.trash.interval", 50); dfs = shims.getMiniDfs(conf, numDataNodes, true, null); fs = dfs.getFileSystem(); // set up the java key provider for encrypted hdfs cluster hes = shims.createHdfsEncryptionShim(fs, conf); LOG.info("key provider is initialized"); } else { dfs = shims.getMiniDfs(conf, numDataNodes, true, null); fs = dfs.getFileSystem(); } } else { throw new IllegalArgumentException("Unknown or unhandled fsType [" + fsType + "]"); } }