private StatsUpdater(CompactionInfo ci, List<String> columnListForStats, HiveConf conf, String userName) { this.conf = new HiveConf(conf); //so that Driver doesn't think it's arleady in a transaction this.conf.unset(ValidTxnList.VALID_TXNS_KEY); this.userName = userName; this.ci = ci; if (!ci.isMajorCompaction() || columnListForStats == null || columnListForStats.isEmpty()) { columnList = Collections.emptyList(); return; } columnList = columnListForStats; }
public CommandProcessorResponse compileAndRespond(String command, boolean cleanupTxnList) { try { compileInternal(command, false); return createProcessorResponse(0); } catch (CommandProcessorResponse e) { return e; } finally { if (cleanupTxnList) { // Valid txn list might be generated for a query compiled using this // command, thus we need to reset it conf.unset(ValidTxnList.VALID_TXNS_KEY); } } }
conf.unset(TezConfiguration.TEZ_QUEUE_NAME); return session; } finally {
conf.unset(TezConfiguration.TEZ_QUEUE_NAME); return session; } finally {
@Test public void testGetInstanceWhenGroupFilterIsEmpty() { conf.unset(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER.varname); assertNull(factory.getInstance(conf)); }
queueName = null; hasQueue = false; conf.unset(TezConfiguration.TEZ_QUEUE_NAME);
driverConf.unset(ValidTxnList.VALID_TXNS_KEY); //so Driver doesn't get confused
queueName = null; hasQueue = false; conf.unset(TezConfiguration.TEZ_QUEUE_NAME);
unset(key);
private synchronized void refreshLocalResources(SparkWork sparkWork, HiveConf conf) throws IOException { // add hive-exec jar addJars((new JobConf(this.getClass())).getJar()); // add aux jars addJars(conf.getAuxJars()); addJars(SessionState.get() == null ? null : SessionState.get().getReloadableAuxJars()); // add added jars String addedJars = Utilities.getResourceFiles(conf, SessionState.ResourceType.JAR); HiveConf.setVar(conf, HiveConf.ConfVars.HIVEADDEDJARS, addedJars); addJars(addedJars); // add plugin module jars on demand // jobConf will hold all the configuration for hadoop, tez, and hive JobConf jobConf = new JobConf(conf); jobConf.set(MR_JAR_PROPERTY, ""); for (BaseWork work : sparkWork.getAllWork()) { work.configureJobConf(jobConf); } addJars(jobConf.get(MR_JAR_PROPERTY)); // remove the location of container tokens conf.unset(MR_CREDENTIALS_LOCATION_PROPERTY); // add added files String addedFiles = Utilities.getResourceFiles(conf, SessionState.ResourceType.FILE); HiveConf.setVar(conf, HiveConf.ConfVars.HIVEADDEDFILES, addedFiles); addResources(addedFiles); // add added archives String addedArchives = Utilities.getResourceFiles(conf, SessionState.ResourceType.ARCHIVE); HiveConf.setVar(conf, HiveConf.ConfVars.HIVEADDEDARCHIVES, addedArchives); addResources(addedArchives); }
commitTxnRequestMigr.setReplLastIdInfo(work.getReplLastIdInfo()); txnManager.replCommitTxn(commitTxnRequestMigr); conf.unset(ValidTxnList.VALID_TXNS_KEY); conf.unset(ReplUtils.REPL_CURRENT_TBL_WRITE_ID); LOG.info("Committed Migration Txn with replLastIdInfo: " + work.getReplLastIdInfo() + " for txnId: " + txnIdMigrationCommit);
@Test public void testFactory() { conf.unset(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_USERFILTER.varname); assertNull(factory.getInstance(conf)); conf.setVar(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_USERFILTER, "User1"); assertNotNull(factory.getInstance(conf)); }
@Test public void testFactory() { conf.unset(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY.varname); assertNull(factory.getInstance(conf)); conf.setVar(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY, CUSTOM_QUERY); assertNotNull(factory.getInstance(conf)); }
conf.unset("tez.queue.name"); poolManager.reopen(session); assertEquals("default", poolManager.getSession(null, conf, false, false).getQueueName()); conf.unset("tez.queue.name"); conf.unset("tez.queue.name"); poolManager.reopen(session); assertEquals("tezq1", poolManager.getSession(null, conf, false, false).getQueueName());
conf.unset(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED.varname);
assertEquals("year=2018", res2.get(1)); } finally { conf.unset(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED.varname);
conf.unset(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED.varname);
/** * Test HIVE-16395 - by default we force cloning of Configurations for Spark jobs */ @Test public void testForceConfCloning() throws Exception { HiveConf conf = getHiveConf(); String sparkCloneConfiguration = HiveSparkClientFactory.SPARK_CLONE_CONFIGURATION; // Clear the value of sparkCloneConfiguration conf.unset(sparkCloneConfiguration); assertNull( "Could not clear " + sparkCloneConfiguration + " in HiveConf", conf.get(sparkCloneConfiguration)); // By default we should set sparkCloneConfiguration to true in the Spark config checkSparkConf(conf, sparkCloneConfiguration, "true"); // User can override value for sparkCloneConfiguration in Hive config to false conf.set(sparkCloneConfiguration, "false"); checkSparkConf(conf, sparkCloneConfiguration, "false"); // User can override value of sparkCloneConfiguration in Hive config to true conf.set(sparkCloneConfiguration, "true"); checkSparkConf(conf, sparkCloneConfiguration, "true"); }
conf.unset(HiveConf.ConfVars.HIVE_TXN_TIMEOUT.varname); connection.close();