public void shutdown() throws Exception { if (System.getenv(QTEST_LEAVE_FILES) == null) { cleanUp(); } if (clusterType.getCoreClusterType() == CoreClusterType.TEZ) { SessionState.get().getTezSession().destroy(); } setup.tearDown(); if (sparkSession != null) { try { SparkSessionManagerImpl.getInstance().closeSession(sparkSession); } catch (Exception ex) { LOG.error("Error closing spark session.", ex); } finally { sparkSession = null; } } if (mr != null) { mr.shutdown(); mr = null; } FileSystem.closeAll(); if (dfs != null) { dfs.shutdown(); dfs = null; } Hive.closeCurrent(); }
public void initConf() throws Exception { String vectorizationEnabled = System.getProperty("test.vectorization.enabled"); if(vectorizationEnabled != null && vectorizationEnabled.equalsIgnoreCase("true")) { conf.setBoolVar(ConfVars.HIVE_VECTORIZATION_ENABLED, true); } // Plug verifying metastore in for testing DirectSQL. conf.setVar(ConfVars.METASTORE_RAW_STORE_IMPL, "org.apache.hadoop.hive.metastore.VerifyingObjectStore"); if (mr != null) { mr.setupConfiguration(conf); // TODO Ideally this should be done independent of whether mr is setup or not. setFsRelatedProperties(conf, fs.getScheme().equals("file"),fs); } conf.set(ConfVars.HIVE_EXECUTION_ENGINE.varname, clusterType.name()); }
public void stop() { verifyStarted(); hiveServer2.stop(); setStarted(false); try { if (mr != null) { mr.shutdown(); mr = null; } if (dfs != null) { dfs.shutdown(); dfs = null; } } catch (IOException e) { // Ignore errors cleaning up miniMR } FileUtils.deleteQuietly(baseDir); }
@Override public void afterClass(HiveTestEnvContext ctx) throws Exception { mr1.shutdown(); } }
@Override public void beforeClass(HiveTestEnvContext ctx) throws Exception { HadoopShims shims = ShimLoader.getHadoopShims(); mr1 = shims.getLocalMiniTezCluster(ctx.hiveConf, true); mr1.setupConfiguration(ctx.hiveConf); }