public void setIntVar(ConfVars var, int val) { setIntVar(this, var, val); }
public TestHiveCopyFiles(int threadCount, boolean isSourceLocal) { hiveConf.setIntVar(HiveConf.ConfVars.HIVE_MOVE_FILES_THREAD_COUNT, threadCount); this.isSourceLocal = isSourceLocal; }
/** * Sets the acidOperationalProperties in the configuration object argument. * @param conf Mutable configuration object * @param properties An acidOperationalProperties object to initialize from. */ public static void setAcidOperationalProperties(Configuration conf, AcidOperationalProperties properties) { if (properties != null) { HiveConf.setIntVar(conf, ConfVars.HIVE_TXN_OPERATIONAL_PROPERTIES, properties.toInt()); } }
public TestCompactionTxnHandler() throws Exception { TxnDbUtil.setConfValues(conf); // Set config so that TxnUtils.buildQueryWithINClauseStrings() will // produce multiple queries conf.setIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH, 1); conf.setIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE, 10); tearDown(); }
public HiveConf createHiveConf(String metaStoreURI, String hiveMetaStorePrincipal) throws IOException { HiveConf hcatConf = new HiveConf(); hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, metaStoreURI); hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); hcatConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); hcatConf.setBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL, true); hcatConf.set(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname, hiveMetaStorePrincipal); return hcatConf; }
public LocalMetaServer() { securityManager = System.getSecurityManager(); System.setSecurityManager(new NoExitSecurityManager()); hiveConf = new HiveConf(TestHiveClientCache.class); hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + MS_PORT); hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, 3); hiveConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); }
@BeforeClass public static void setUp() throws Exception { LOG.debug("Setting up output service"); Configuration conf = new Configuration(); // Pick random avail port HiveConf.setIntVar(conf, HiveConf.ConfVars.LLAP_DAEMON_OUTPUT_SERVICE_PORT, 0); HiveConf.setBoolVar(conf, HiveConf.ConfVars.LLAP_OUTPUT_FORMAT_ARROW, false); LlapOutputFormatService.initializeAndStart(conf, null); service = LlapOutputFormatService.get(); LlapProxy.setDaemon(true); LOG.debug("Output service up"); }
@Before public void init() { hiveConf = new HiveConf(); hiveConf.setVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST, "localhost"); hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT, 15000); hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS, false); hiveConf.setVar(HiveConf.ConfVars.HIVE_SERVER2_AUTHENTICATION, HiveAuthConstants.AuthTypes.NONE.toString()); hiveConf.setVar(HiveConf.ConfVars.HIVE_SERVER2_TRANSPORT_MODE, "binary"); hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_RETRY_LIMIT, 3); hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_CONNECTION_RETRY_LIMIT, 3); hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_ASYNC_EXEC_THREADS, 10); hiveConf.setVar(HiveConf.ConfVars.HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT, "1s"); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); }
@Test public void testInConversion() throws ParseException { String query = "explain select sum(id_uv) from tu where u in (1,2) group by u"; HiveConf conf = env_setup.getTestCtx().hiveConf; conf.setIntVar(ConfVars.HIVEPOINTLOOKUPOPTIMIZERMIN, 10); IDriver driver = createDriver(); PlanMapper pm = getMapperForQuery(driver, query); List<FilterOperator> fos = pm.getAll(FilterOperator.class); OpTreeSignature filterSig = pm.lookup(OpTreeSignature.class, fos.get(0)); Object pred = filterSig.getSig().getSigMap().get("getPredicateString"); assertEquals("((u = 1) or (u = 2)) (type: boolean)", pred); }
@Test public void testSingleThreadedCheckMetastore() throws HiveException, AlreadyExistsException, IOException, MetastoreException { // set num of threads to 0 so that single-threaded checkMetastore is called hive.getConf().setIntVar(HiveConf.ConfVars.METASTORE_FS_HANDLER_THREADS_COUNT, 0); Table testTable = createPartitionedTestTable(dbName, tableName, 2, 0); // add 10 partitions on the filesystem createPartitionsDirectoriesOnFS(testTable, 10); CheckResult result = new CheckResult(); checker.checkMetastore(catName, dbName, tableName, null, result); assertEquals(Collections.<String> emptySet(), result.getTablesNotInMs()); assertEquals(Collections.<String> emptySet(), result.getTablesNotOnFs()); assertEquals(Collections.<CheckResult.PartitionResult> emptySet(), result.getPartitionsNotOnFs()); assertEquals(10, result.getPartitionsNotInMs().size()); }
@Test public void testParallelCompilationWithUnboundedQuota() throws Exception { conf.setBoolVar(HIVE_SERVER2_PARALLEL_COMPILATION, true); conf.setIntVar(HIVE_SERVER2_PARALLEL_COMPILATION_LIMIT, -1); initDriver(conf, 10); List<CommandProcessorResponse> responseList = compileAndRespond(10); verifyThatWaitingCompileOpsCountIsEqualTo(0); verifyThatTimedOutCompileOpsCountIsZero(responseList); verifyThatConcurrentCompilationWasIndeed(responseList); }
@Test public void testParallelCompilationWithUnboundedQuotaAndSingleSession() throws Exception { conf.setBoolVar(HIVE_SERVER2_PARALLEL_COMPILATION, true); conf.setIntVar(HIVE_SERVER2_PARALLEL_COMPILATION_LIMIT, -1); initDriver(conf, 10); List<CommandProcessorResponse> responseList = compileAndRespond(true, 10); verifyThatWaitingCompileOpsCountIsEqualTo(0); verifyThatTimedOutCompileOpsCountIsZero(responseList); verifyThatNoConcurrentCompilationWasIndeed(responseList); }
@BeforeClass public static void setupClass() throws Exception { hiveConf = new HiveConf(TestMsckCreatePartitionsInBatches.class); hiveConf.setIntVar(ConfVars.HIVE_MSCK_REPAIR_BATCH_SIZE, 5); hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); SessionState.start(hiveConf); db = new HiveMetaStoreClient(hiveConf); msck = new Msck( false, false); msck.init(hiveConf); }
@Test public void testParallelCompilationTimeoutWithSingleQuota() throws Exception { conf.setBoolVar(HIVE_SERVER2_PARALLEL_COMPILATION, true); conf.setIntVar(HIVE_SERVER2_PARALLEL_COMPILATION_LIMIT, 1); conf.setTimeVar(HIVE_SERVER2_COMPILE_LOCK_TIMEOUT, 1, TimeUnit.SECONDS); initDriver(conf, 10); List<CommandProcessorResponse> responseList = compileAndRespond(10); verifyThatWaitingCompileOpsCountIsEqualTo(0); verifyThatTimedOutCompileOpsCountIsNotZero(responseList); }
@Test public void testParallelCompilationWithMultipleQuotas() throws Exception { conf.setBoolVar(HIVE_SERVER2_PARALLEL_COMPILATION, true); conf.setIntVar(HIVE_SERVER2_PARALLEL_COMPILATION_LIMIT, 2); initDriver(conf, 10); List<CommandProcessorResponse> responseList = compileAndRespond(10); verifyThatWaitingCompileOpsCountIsEqualTo(0); verifyThatTimedOutCompileOpsCountIsZero(responseList); verifyThatConcurrentCompilationWasIndeed(responseList); }
@Test public void testParallelCompilationWithSingleQuota() throws Exception { conf.setBoolVar(HIVE_SERVER2_PARALLEL_COMPILATION, true); conf.setIntVar(HIVE_SERVER2_PARALLEL_COMPILATION_LIMIT, 1); initDriver(conf, 10); List<CommandProcessorResponse> responseList = compileAndRespond(10); verifyThatWaitingCompileOpsCountIsEqualTo(0); verifyThatTimedOutCompileOpsCountIsZero(responseList); verifyThatNoConcurrentCompilationWasIndeed(responseList); }
/** * Test that checks that the queries above the quota are timed out, so the compilation quota maximum is honored. * @throws Exception */ @Test public void testParallelCompilationTimeoutWithMultipleQuota() throws Exception { conf.setBoolVar(HIVE_SERVER2_PARALLEL_COMPILATION, true); conf.setIntVar(HIVE_SERVER2_PARALLEL_COMPILATION_LIMIT, 4); conf.setTimeVar(HIVE_SERVER2_COMPILE_LOCK_TIMEOUT, 1, TimeUnit.SECONDS); initDriver(conf, 10); List<CommandProcessorResponse> responseList = compileAndRespond(LONG_QUERY, 10); verifyThatWaitingCompileOpsCountIsEqualTo(0); verifyThatTimedOutCompileOpsCount(responseList, 6); }
@Test public void testParallelCompilationWithSingleQuotaAndZeroTimeout() throws Exception { conf.setBoolVar(HIVE_SERVER2_PARALLEL_COMPILATION, true); conf.setIntVar(HIVE_SERVER2_PARALLEL_COMPILATION_LIMIT, 1); conf.setTimeVar(HIVE_SERVER2_COMPILE_LOCK_TIMEOUT, 0, TimeUnit.SECONDS); initDriver(conf, 10); List<CommandProcessorResponse> responseList = compileAndRespond(10); verifyThatWaitingCompileOpsCountIsEqualTo(0); verifyThatTimedOutCompileOpsCountIsZero(responseList); verifyThatNoConcurrentCompilationWasIndeed(responseList); }
@BeforeClass public static void setupClass() throws HiveException, MetaException { hiveConf = new HiveConf(TestMsckCreatePartitionsInBatches.class); hiveConf.setIntVar(ConfVars.HIVE_MSCK_REPAIR_BATCH_SIZE, 5); hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); SessionState.start(hiveConf); try { db = new HiveMetaStoreClient(hiveConf); } catch (MetaException e) { throw new HiveException(e); } msck = new Msck( false, false); msck.init(hiveConf); }
@Test public void testCacheHit() throws IOException, MetaException, LoginException { HiveClientCache cache = new HiveClientCache(1000); HiveClientCache.ICacheableMetaStoreClient client = (HiveClientCache.ICacheableMetaStoreClient) cache.get(hiveConf); assertNotNull(client); client.close(); // close shouldn't matter // Setting a non important configuration should return the same client only hiveConf.setIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS, 10); HiveClientCache.ICacheableMetaStoreClient client2 = (HiveClientCache.ICacheableMetaStoreClient) cache.get(hiveConf); assertNotNull(client2); assertSame(client, client2); assertEquals(client.getUsers(), client2.getUsers()); client2.close(); }