Refine search
public HiveClientCache(HiveConf hiveConf) { this((int) HiveConf.getTimeVar(hiveConf, HiveConf.ConfVars.METASTORE_CLIENT_CACHE_EXPIRY_TIME, TimeUnit.SECONDS), HiveConf.getIntVar(hiveConf, HiveConf.ConfVars.METASTORE_CLIENT_CACHE_INITIAL_CAPACITY), HiveConf.getIntVar(hiveConf, HiveConf.ConfVars.METASTORE_CLIENT_CACHE_MAX_CAPACITY), HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.METASTORE_CLIENT_CACHE_STATS_ENABLED)); }
public LlapFixedRegistryImpl(String hosts, Configuration conf) { this.hosts = hosts.split(","); this.port = HiveConf.getIntVar(conf, ConfVars.LLAP_DAEMON_RPC_PORT); this.shuffle = HiveConf.getIntVar(conf, ConfVars.LLAP_DAEMON_YARN_SHUFFLE_PORT); this.resolveHosts = conf.getBoolean(FIXED_REGISTRY_RESOLVE_HOST_NAMES, true); this.mngPort = HiveConf.getIntVar(conf, ConfVars.LLAP_MANAGEMENT_RPC_PORT); this.outputFormatPort = HiveConf.getIntVar(conf, ConfVars.LLAP_DAEMON_OUTPUT_SERVICE_PORT); this.webPort = HiveConf.getIntVar(conf, ConfVars.LLAP_DAEMON_WEB_PORT); boolean isSsl = HiveConf.getBoolVar(conf, ConfVars.LLAP_DAEMON_WEB_SSL); this.webScheme = isSsl ? "https" : "http"; for (Map.Entry<String, String> kv : conf) { if (kv.getKey().startsWith(HiveConf.PREFIX_LLAP) || kv.getKey().startsWith(HiveConf.PREFIX_HIVE_LLAP)) { // TODO: read this somewhere useful, like the task scheduler srv.put(kv.getKey(), kv.getValue()); } } this.memory = HiveConf.getIntVar(conf, ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB); this.vcores = HiveConf.getIntVar(conf, ConfVars.LLAP_DAEMON_NUM_EXECUTORS); }
public static void initialize(String appName, Configuration daemonConf) { int numExecutors = HiveConf.getIntVar(daemonConf, ConfVars.LLAP_DAEMON_NUM_EXECUTORS); long executorMemoryBytes = HiveConf.getIntVar(daemonConf, ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB) * 1024l * 1024l; long ioMemoryBytes = HiveConf.getSizeVar(daemonConf, ConfVars.LLAP_IO_MEMORY_MAX_SIZE); boolean isDirectCache = HiveConf.getBoolVar(daemonConf, ConfVars.LLAP_ALLOCATOR_DIRECT); boolean isLlapIo = HiveConf.getBoolVar(daemonConf, HiveConf.ConfVars.LLAP_IO_ENABLED, true); String pid = System.getenv("JVM_PID"); initialize(appName, numExecutors, executorMemoryBytes, ioMemoryBytes, isDirectCache, isLlapIo, pid); }
public HybridHashTableContainer(Configuration hconf, long keyCount, long memoryAvailable, long estimatedTableSize, HybridHashTableConf nwayConf) throws SerDeException, IOException { this(HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEKEYCOUNTADJUSTMENT), HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLETHRESHOLD), HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLELOADFACTOR), HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMEMCHECKFREQ), HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINWBSIZE), HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEWBSIZE), HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS), HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEMAPJOINOPTIMIZEDTABLEPROBEPERCENT), HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINBLOOMFILTER), estimatedTableSize, keyCount, memoryAvailable, nwayConf, HiveUtils.getLocalDirList(hconf)); }
public String substitute(HiveConf conf, String expr) { if (expr == null) { return expr; } if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEVARIABLESUBSTITUTE)) { l4j.debug("Substitution is on: " + expr); } else { return expr; } int depth = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVEVARIABLESUBSTITUTEDEPTH); return substitute(conf, expr, depth); } }
public Endpoint getServicesEndpoint() { final int servicePort = HiveConf.getIntVar(conf, ConfVars.LLAP_DAEMON_WEB_PORT); final boolean isSSL = HiveConf.getBoolVar(conf, ConfVars.LLAP_DAEMON_WEB_SSL); final String scheme = isSSL ? "https" : "http"; final URL serviceURL; try { serviceURL = new URL(scheme, hostname, servicePort, ""); return RegistryTypeUtils.webEndpoint(IPC_SERVICES, serviceURL.toURI()); } catch (MalformedURLException e) { throw new RuntimeException(e); } catch (URISyntaxException e) { throw new RuntimeException("llap service URI for " + hostname + " is invalid", e); } }
public HybridHashTableContainer(Configuration hconf, long keyCount, long memoryAvailable, long estimatedTableSize, HybridHashTableConf nwayConf) throws SerDeException, IOException { this(HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEKEYCOUNTADJUSTMENT), HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLETHRESHOLD), HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLELOADFACTOR), HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMEMCHECKFREQ), HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINWBSIZE), HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEWBSIZE), HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS), HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEMAPJOINOPTIMIZEDTABLEPROBEPERCENT), HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINBLOOMFILTER), estimatedTableSize, keyCount, memoryAvailable, nwayConf, HiveUtils.getLocalDirList(hconf)); }
public LlapDecisionDispatcher(PhysicalContext pctx, LlapMode mode) { conf = pctx.getConf(); doSkipUdfCheck = HiveConf.getBoolVar(conf, ConfVars.LLAP_SKIP_COMPILE_UDF_CHECK); arePermanentFnsAllowed = HiveConf.getBoolVar(conf, ConfVars.LLAP_ALLOW_PERMANENT_FNS); // Don't user uber in "all" mode - everything can go into LLAP, which is better than uber. shouldUber = HiveConf.getBoolVar(conf, ConfVars.LLAP_AUTO_ALLOW_UBER) && (mode != all); minReducersPerExec = HiveConf.getFloatVar( conf, ConfVars.TEZ_LLAP_MIN_REDUCER_PER_EXECUTOR); executorsPerNode = HiveConf.getIntVar(conf, ConfVars.LLAP_DAEMON_NUM_EXECUTORS); mapJoinOpList = new ArrayList<MapJoinOperator>(); rules = getRules(); }
public static MetaDataFormatter getFormatter(HiveConf conf) { if ("json".equals(conf.get(HiveConf.ConfVars.HIVE_DDL_OUTPUT_FORMAT.varname, "text"))) { return new JsonMetaDataFormatter(); } else { return new TextMetaDataFormatter(conf.getIntVar(HiveConf.ConfVars.CLIPRETTYOUTPUTNUMCOLS), conf.getBoolVar(ConfVars.HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY)); } }
public static FixedSizedObjectPool<IoTrace> createTracePool(Configuration conf) { final int ioTraceSize = (int)HiveConf.getSizeVar(conf, ConfVars.LLAP_IO_TRACE_SIZE); final boolean isAlwaysDump = HiveConf.getBoolVar(conf, ConfVars.LLAP_IO_TRACE_ALWAYS_DUMP); int ioThreads = HiveConf.getIntVar(conf, ConfVars.LLAP_IO_THREADPOOL_SIZE); return new FixedSizedObjectPool<>(ioThreads, new Pool.PoolObjectHelper<IoTrace>() { @Override public IoTrace create() { return new IoTrace(ioTraceSize, isAlwaysDump); } @Override public void resetBeforeOffer(IoTrace t) { t.reset(); } }); } }
public static MetaDataFormatter getFormatter(HiveConf conf) { if ("json".equals(conf.get(HiveConf.ConfVars.HIVE_DDL_OUTPUT_FORMAT.varname, "text"))) { return new JsonMetaDataFormatter(); } else { return new TextMetaDataFormatter(conf.getIntVar(HiveConf.ConfVars.CLIPRETTYOUTPUTNUMCOLS), conf.getBoolVar(ConfVars.HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY)); } } }
public void setupNonPool(HiveConf conf) { this.initConf = conf; numConcurrentLlapQueries = conf.getIntVar(ConfVars.HIVE_SERVER2_LLAP_CONCURRENT_QUERIES); llapQueue = new Semaphore(numConcurrentLlapQueries, true); String queueAllowedStr = HiveConf.getVar(conf, ConfVars.HIVE_SERVER2_TEZ_SESSION_CUSTOM_QUEUE_ALLOWED); try { this.customQueueAllowed = CustomQueueAllowed.valueOf(queueAllowedStr.toUpperCase()); } catch (Exception ex) { throw new RuntimeException("Invalid value '" + queueAllowedStr + "' for " + ConfVars.HIVE_SERVER2_TEZ_SESSION_CUSTOM_QUEUE_ALLOWED.varname); } if (customQueueAllowed == CustomQueueAllowed.TRUE && HiveConf.getBoolVar(conf, ConfVars.HIVE_SERVER2_TEZ_QUEUE_ACCESS_CHECK)) { this.yarnQueueChecker = new YarnQueueHelper(conf); } restrictedConfig = new RestrictedConfigChecker(conf); }
public BuddyAllocator(Configuration conf, MemoryManager mm, LlapDaemonCacheMetrics metrics) { this(HiveConf.getBoolVar(conf, ConfVars.LLAP_ALLOCATOR_DIRECT), HiveConf.getBoolVar(conf, ConfVars.LLAP_ALLOCATOR_MAPPED), (int)HiveConf.getSizeVar(conf, ConfVars.LLAP_ALLOCATOR_MIN_ALLOC), (int)HiveConf.getSizeVar(conf, ConfVars.LLAP_ALLOCATOR_MAX_ALLOC), HiveConf.getIntVar(conf, ConfVars.LLAP_ALLOCATOR_ARENA_COUNT), getMaxTotalMemorySize(conf), HiveConf.getSizeVar(conf, ConfVars.LLAP_ALLOCATOR_DEFRAG_HEADROOM), HiveConf.getVar(conf, ConfVars.LLAP_ALLOCATOR_MAPPED_PATH), mm, metrics, HiveConf.getVar(conf, ConfVars.LLAP_ALLOCATOR_DISCARD_METHOD), HiveConf.getBoolVar(conf, ConfVars.LLAP_ALLOCATOR_PREALLOCATE) ); }
public static CompileLock newInstance(HiveConf conf, String command) { Lock underlying = SERIALIZABLE_COMPILE_LOCK; boolean isParallelEnabled = (conf != null) && HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_SERVER2_PARALLEL_COMPILATION); if (isParallelEnabled) { int compileQuota = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_SERVER2_PARALLEL_COMPILATION_LIMIT); underlying = (compileQuota > 0) ? SessionWithQuotaCompileLock.instance : SessionState.get().getCompileLock(); } long timeout = HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_SERVER2_COMPILE_LOCK_TIMEOUT, TimeUnit.SECONDS); return new CompileLock(underlying, timeout, command); }
Integer.toString(hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT))); confsToPublish.put(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PATH.varname, hiveConf.getVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PATH)); } else { confsToPublish.put(ConfVars.HIVE_SERVER2_THRIFT_PORT.varname, Integer.toString(hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_PORT))); confsToPublish.put(ConfVars.HIVE_SERVER2_THRIFT_SASL_QOP.varname, hiveConf.getVar(ConfVars.HIVE_SERVER2_THRIFT_SASL_QOP)); Boolean.toString(hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_USE_SSL)));
public AbstractCorrelationProcCtx(ParseContext pctx) { removedOps = new HashSet<Operator<?>>(); trustScript = pctx.getConf().getBoolVar(HIVESCRIPTOPERATORTRUST); if(pctx.hasAcidWrite()) { StringBuilder tblNames = new StringBuilder(); for(FileSinkDesc fsd : pctx.getAcidSinks()) { if(fsd.getTable() != null) { tblNames.append(fsd.getTable().getDbName()).append('.').append(fsd.getTable().getTableName()).append(','); } } if(tblNames.length() > 0) { tblNames.setLength(tblNames.length() - 1);//traling ',' } LOG.info("Overriding " + HIVEOPTREDUCEDEDUPLICATIONMINREDUCER + " to 1 due to a write to transactional table(s) " + tblNames); minReducer = 1; } else { minReducer = pctx.getConf().getIntVar(HIVEOPTREDUCEDEDUPLICATIONMINREDUCER); } isMapAggr = pctx.getConf().getBoolVar(HIVEMAPSIDEAGGREGATE); this.pctx = pctx; }
@Override public synchronized void init(HiveConf hiveConf) { this.hiveConf = hiveConf; //Create operation log root directory, if operation logging is enabled if (hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED)) { initOperationLogRootDir(); } createBackgroundOperationPool(); addService(operationManager); initSessionImplClassName(); Metrics metrics = MetricsFactory.getInstance(); if(metrics != null){ registerOpenSesssionMetrics(metrics); registerActiveSesssionMetrics(metrics); } userLimit = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_LIMIT_CONNECTIONS_PER_USER); ipAddressLimit = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_LIMIT_CONNECTIONS_PER_IPADDRESS); userIpAddressLimit = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_LIMIT_CONNECTIONS_PER_USER_IPADDRESS); LOG.info("Connections limit are user: {} ipaddress: {} user-ipaddress: {}", userLimit, ipAddressLimit, userIpAddressLimit); super.init(hiveConf); }
public AbstractCorrelationProcCtx(ParseContext pctx) { removedOps = new HashSet<Operator<?>>(); trustScript = pctx.getConf().getBoolVar(HIVESCRIPTOPERATORTRUST); if(pctx.hasAcidWrite()) { StringBuilder tblNames = new StringBuilder(); for(FileSinkDesc fsd : pctx.getAcidSinks()) { if(fsd.getTable() != null) { tblNames.append(fsd.getTable().getDbName()).append('.').append(fsd.getTable().getTableName()).append(','); } } if(tblNames.length() > 0) { tblNames.setLength(tblNames.length() - 1);//traling ',' } LOG.info("Overriding " + HIVEOPTREDUCEDEDUPLICATIONMINREDUCER + " to 1 due to a write to transactional table(s) " + tblNames); minReducer = 1; } else { minReducer = pctx.getConf().getIntVar(HIVEOPTREDUCEDEDUPLICATIONMINREDUCER); } isMapAggr = pctx.getConf().getBoolVar(HIVEMAPSIDEAGGREGATE); this.pctx = pctx; }
private void createBackgroundOperationPool() { int poolSize = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_ASYNC_EXEC_THREADS); LOG.info("HiveServer2: Background operation thread pool size: " + poolSize); int poolQueueSize = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_ASYNC_EXEC_WAIT_QUEUE_SIZE); LOG.info("HiveServer2: Background operation thread wait queue size: " + poolQueueSize); long keepAliveTime = HiveConf.getTimeVar( sessionTimeout = HiveConf.getTimeVar( hiveConf, ConfVars.HIVE_SERVER2_IDLE_SESSION_TIMEOUT, TimeUnit.MILLISECONDS); checkOperation = HiveConf.getBoolVar(hiveConf, ConfVars.HIVE_SERVER2_IDLE_SESSION_CHECK_OPERATION);
MapWork work, boolean finalMapRed) throws IOException { long bytesPerReducer = conf.getLongVar(HiveConf.ConfVars.BYTESPERREDUCER); int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS); boolean powersOfTwo = conf.getBoolVar(HiveConf.ConfVars.HIVE_INFER_BUCKET_SORT_NUM_BUCKETS_POWER_TWO) && finalMapRed && !work.getBucketedColsByDirectory().isEmpty();