private ReplicationSpec getNewEventOnlyReplicationSpec(Long eventId) { ReplicationSpec rspec = getNewReplicationSpec(eventId.toString(), eventId.toString(), conf.getBoolean( HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY.varname, false)); rspec.setReplSpecType(ReplicationSpec.Type.INCREMENTAL_DUMP); return rspec; }
public HiveClientCache(HiveConf hiveConf) { this(hiveConf.getInt(HCatConstants.HCAT_HIVE_CLIENT_EXPIRY_TIME, DEFAULT_HIVE_CACHE_EXPIRY_TIME_SECONDS), hiveConf.getInt(HCatConstants.HCAT_HIVE_CLIENT_CACHE_INITIAL_CAPACITY, DEFAULT_HIVE_CACHE_INITIAL_CAPACITY), hiveConf.getInt(HCatConstants.HCAT_HIVE_CLIENT_CACHE_MAX_CAPACITY, DEFAULT_HIVE_CACHE_MAX_CAPACITY), hiveConf.getBoolean(HCatConstants.HCAT_HIVE_CLIENT_CACHE_STATS_ENABLED, DEFAULT_HIVE_CLIENT_CACHE_STATS_ENABLED)); }
public void run(SessionState sess, Set<ReadEntity> inputs, Set<WriteEntity> outputs, UserGroupInformation ugi, boolean isExplain) throws Exception { // Don't enforce during test driver setup or shutdown. if (sess.getConf().getBoolean("hive.test.init.phase", false) || sess.getConf().getBoolean("hive.test.shutdown.phase", false)) { return; } List<String> readOnlyTables = Arrays.asList(System.getProperty("test.src.tables").split(",")); for (WriteEntity w: outputs) { if ((w.getTyp() == WriteEntity.Type.TABLE) || (w.getTyp() == WriteEntity.Type.PARTITION)) { Table t = w.getTable(); if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(t.getDbName()) && readOnlyTables.contains(t.getTableName()) && !isExplain) { throw new RuntimeException ("Cannot overwrite read-only table: " + t.getTableName()); } } } } }
/** * Please note that the method is very tied with Spark documentation 1.4.1 regarding * dynamic allocation, such as default values. * @return */ private int getExecutorsToWarm() { int minExecutors = HiveConf.getIntVar(hiveConf, HiveConf.ConfVars.HIVE_PREWARM_NUM_CONTAINERS); boolean dynamicAllocation = hiveConf.getBoolean("spark.dynamicAllocation.enabled", false); if (dynamicAllocation) { int min = sparkConf.getInt("spark.dynamicAllocation.minExecutors", 0); int initExecutors = sparkConf.getInt("spark.dynamicAllocation.initialExecutors", min); minExecutors = Math.min(minExecutors, initExecutors); } else { int execInstances = sparkConf.getInt("spark.executor.instances", 2); minExecutors = Math.min(minExecutors, execInstances); } return minExecutors; }
/** * Get or create a hive client depending on whether it exits in cache or not * @param hiveConf The hive configuration * @return the client * @throws MetaException When HiveMetaStoreClient couldn't be created * @throws IOException */ public static IMetaStoreClient getHiveMetastoreClient(HiveConf hiveConf) throws MetaException, IOException { if (hiveConf.getBoolean(HCatConstants.HCAT_HIVE_CLIENT_DISABLE_CACHE, false)){ // If cache is disabled, don't use it. return HiveClientCache.getNonCachedHiveMetastoreClient(hiveConf); } // Singleton behaviour: create the cache instance if required. if (hiveClientCache == null) { synchronized (IMetaStoreClient.class) { if (hiveClientCache == null) { hiveClientCache = new HiveClientCache(hiveConf); } } } try { return hiveClientCache.get(hiveConf); } catch (LoginException e) { throw new IOException("Couldn't create hiveMetaStoreClient, Error getting UGI for user", e); } }
/** * Please note that the method is very tied with Spark documentation 1.4.1 regarding * dynamic allocation, such as default values. * @return */ private int getExecutorsToWarm() { int minExecutors = HiveConf.getIntVar(hiveConf, HiveConf.ConfVars.HIVE_PREWARM_NUM_CONTAINERS); boolean dynamicAllocation = hiveConf.getBoolean("spark.dynamicAllocation.enabled", false); if (dynamicAllocation) { int min = sparkConf.getInt("spark.dynamicAllocation.minExecutors", 0); int initExecutors = sparkConf.getInt("spark.dynamicAllocation.initialExecutors", min); minExecutors = Math.min(minExecutors, initExecutors); } else { int execInstances = sparkConf.getInt("spark.executor.instances", 2); minExecutors = Math.min(minExecutors, execInstances); } return minExecutors; }
public void run(SessionState sess, Set<ReadEntity> inputs, Set<WriteEntity> outputs, UserGroupInformation ugi) throws Exception { // Don't enforce during test driver setup or shutdown. if (sess.getConf().getBoolean("hive.test.init.phase", false) || sess.getConf().getBoolean("hive.test.shutdown.phase", false)) { return; } for (WriteEntity w: outputs) { if ((w.getTyp() == WriteEntity.Type.TABLE) || (w.getTyp() == WriteEntity.Type.PARTITION)) { Table t = w.getTable(); if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(t.getDbName()) && READ_ONLY_TABLES.contains(t.getTableName())) { throw new RuntimeException ("Cannot overwrite read-only table: " + t.getTableName()); } } } } }
return; if (context.getConf().getBoolean(SPARK_DYNAMIC_ALLOCATION_ENABLED, false)) {
if (hiveConf.getBoolean(ConfVars.HIVE_SERVER2_XSRF_FILTER_ENABLED.varname,false)){ boolean continueProcessing = Utils.doXsrfFilter(request,response,null,null); if (!continueProcessing){
return; if (context.getConf().getBoolean(SPARK_DYNAMIC_ALLOCATION_ENABLED, false)) {
if (hiveConf.getBoolean(ConfVars.HIVE_SERVER2_XSRF_FILTER_ENABLED.varname, false)) {
@Test public void testHideNonStringVar() throws Exception { Assert.assertTrue(conf.getBoolean("dummyBoolean", false)); Assert.assertEquals("true", conf.get("dummyBoolean")); HiveConfUtil.stripConfigurations(conf, Sets.newHashSet("dummyBoolean")); Assert.assertFalse(conf.getBoolean("dummyBoolean", false)); Assert.assertEquals("", conf.get("dummyBoolean")); }
boolean useTsStats = context.getConf().getBoolean(HiveConf.ConfVars.SPARK_USE_TS_STATS_FOR_MAPJOIN.varname, false);
boolean useTsStats = context.getConf().getBoolean(HiveConf.ConfVars.SPARK_USE_FILE_SIZE_FOR_MAPJOIN.varname, false); boolean hasUpstreamSinks = false;
public boolean isDataCompletenessCheckEnabled() { if (isDataCompletenessCheckEnabled == null) { isDataCompletenessCheckEnabled = config.getBoolean(LensConfConstants.ENABLE_DATACOMPLETENESS_CHECK, LensConfConstants.DEFAULT_ENABLE_DATACOMPLETENESS_CHECK); } return isDataCompletenessCheckEnabled; }
public boolean isDataCompletenessCheckEnabled() { if (isDataCompletenessCheckEnabled == null) { isDataCompletenessCheckEnabled = config.getBoolean(LensConfConstants.ENABLE_DATACOMPLETENESS_CHECK, LensConfConstants.DEFAULT_ENABLE_DATACOMPLETENESS_CHECK); } return isDataCompletenessCheckEnabled; }
private boolean isAuthorizationEnabled() { if (isAuthorizationCheckEnabled == null) { isAuthorizationCheckEnabled = config.getBoolean(LensConfConstants.ENABLE_METASTORE_SCHEMA_AUTHORIZATION_CHECK, LensConfConstants.DEFAULT_ENABLE_METASTORE_SCHEMA_AUTHORIZATION_CHECK); } return isAuthorizationCheckEnabled; }
private static boolean allowLocalMetaStore(HiveConf conf) { return conf.getBoolean(ALLOW_LOCAL_METASTORE, false); }
private CubeMetastoreClient(HiveConf conf) { this.config = new HiveConf(conf); this.enableCaching = conf.getBoolean(MetastoreConstants.METASTORE_ENABLE_CACHING, true); }
public HiveClientCache(HiveConf hiveConf) { this(hiveConf.getInt(HCatConstants.HCAT_HIVE_CLIENT_EXPIRY_TIME, DEFAULT_HIVE_CACHE_EXPIRY_TIME_SECONDS), hiveConf.getInt(HCatConstants.HCAT_HIVE_CLIENT_CACHE_INITIAL_CAPACITY, DEFAULT_HIVE_CACHE_INITIAL_CAPACITY), hiveConf.getInt(HCatConstants.HCAT_HIVE_CLIENT_CACHE_MAX_CAPACITY, DEFAULT_HIVE_CACHE_MAX_CAPACITY), hiveConf.getBoolean(HCatConstants.HCAT_HIVE_CLIENT_CACHE_STATS_ENABLED, DEFAULT_HIVE_CLIENT_CACHE_STATS_ENABLED)); }