private LlapOutputFormatService(Configuration conf, SecretManager sm) throws IOException { this.sm = sm; this.conf = conf; this.writerTimeoutMs = HiveConf.getTimeVar( conf, ConfVars.LLAP_DAEMON_OUTPUT_STREAM_TIMEOUT, TimeUnit.MILLISECONDS); }
public LlapProtocolClientProxy( int numThreads, Configuration conf, Token<LlapTokenIdentifier> llapToken) { // We could pass in the number of nodes that we expect instead of -1. // Also, a single concurrent request per node is currently hardcoded. super(LlapProtocolClientProxy.class.getSimpleName(), numThreads, conf, llapToken, HiveConf.getTimeVar(conf, ConfVars.LLAP_TASK_COMMUNICATOR_CONNECTION_TIMEOUT_MS, TimeUnit.MILLISECONDS), HiveConf.getTimeVar(conf, ConfVars.LLAP_TASK_COMMUNICATOR_CONNECTION_SLEEP_BETWEEN_RETRIES_MS, TimeUnit.MILLISECONDS), -1, 1); }
long getServerConnectTimeoutMs() { String value = config.get(HiveConf.ConfVars.SPARK_RPC_CLIENT_HANDSHAKE_TIMEOUT.varname); return value != null ? Long.parseLong(value) : DEFAULT_CONF.getTimeVar( HiveConf.ConfVars.SPARK_RPC_CLIENT_HANDSHAKE_TIMEOUT, TimeUnit.MILLISECONDS); }
long getConnectTimeoutMs() { String value = config.get(HiveConf.ConfVars.SPARK_RPC_CLIENT_CONNECT_TIMEOUT.varname); return value != null ? Long.parseLong(value) : DEFAULT_CONF.getTimeVar( HiveConf.ConfVars.SPARK_RPC_CLIENT_CONNECT_TIMEOUT, TimeUnit.MILLISECONDS); }
public static SessionExpirationTracker create(HiveConf conf, RestartImpl restartImpl) { long sessionLifetimeMs = conf.getTimeVar( ConfVars.HIVE_SERVER2_TEZ_SESSION_LIFETIME, TimeUnit.MILLISECONDS); if (sessionLifetimeMs == 0) return null; return new SessionExpirationTracker(sessionLifetimeMs, conf.getTimeVar( ConfVars.HIVE_SERVER2_TEZ_SESSION_LIFETIME_JITTER, TimeUnit.MILLISECONDS), restartImpl); }
public long getFutureTimeoutMs() { String value = config.get(HiveConf.ConfVars.SPARK_CLIENT_FUTURE_TIMEOUT.varname); return value != null ? Long.parseLong(value) : DEFAULT_CONF.getTimeVar( HiveConf.ConfVars.SPARK_CLIENT_FUTURE_TIMEOUT, TimeUnit.MILLISECONDS); }
public LlapPluginEndpointClientImpl( Configuration conf, Token<JobTokenIdentifier> token, int expectedNodes) { // A single concurrent request per node is currently hardcoded. The node includes a port number // so different AMs on the same host count as different nodes; we only have one request type, // and it is not useful to send more than one in parallel. super(LlapPluginEndpointClientImpl.class.getSimpleName(), HiveConf.getIntVar(conf, ConfVars.LLAP_PLUGIN_CLIENT_NUM_THREADS), conf, token, HiveConf.getTimeVar(conf, ConfVars.LLAP_TASK_COMMUNICATOR_CONNECTION_TIMEOUT_MS, TimeUnit.MILLISECONDS), HiveConf.getTimeVar(conf, ConfVars.LLAP_TASK_COMMUNICATOR_CONNECTION_SLEEP_BETWEEN_RETRIES_MS, TimeUnit.MILLISECONDS), expectedNodes, 1); }
@Override public void init(AtomicBoolean stop, AtomicBoolean looped) throws Exception { super.init(stop, looped); checkInterval = conf.getTimeVar(HiveConf.ConfVars.HIVE_COMPACTOR_CHECK_INTERVAL, TimeUnit.MILLISECONDS) ; }
public LlapTaskUmbilicalExternalImpl(Configuration conf) { long taskInterval = HiveConf.getTimeVar(conf, HiveConf.ConfVars.LLAP_DAEMON_AM_LIVENESS_CONNECTION_TIMEOUT_MS, TimeUnit.MILLISECONDS); // Setup timer task to check for hearbeat timeouts this.timer = new ScheduledThreadPoolExecutor(1); timer.scheduleAtFixedRate(new HeartbeatCheckTask(this), taskInterval, taskInterval, TimeUnit.MILLISECONDS); }
protected RetryingThriftCLIServiceClient(HiveConf conf) { this.conf = conf; retryLimit = conf.getIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_RETRY_LIMIT); retryDelaySeconds = (int) conf.getTimeVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_RETRY_DELAY_SECONDS, TimeUnit.SECONDS); }
@Override public void setConf(Configuration conf) { this.conf = conf; dumpRoot = new Path(HiveConf.getVar(conf, ConfVars.REPLDIR)); ttl = HiveConf.getTimeVar(conf, ConfVars.REPL_DUMPDIR_TTL, TimeUnit.MILLISECONDS); }
RemoteHiveSparkClient(HiveConf hiveConf, Map<String, String> conf, String sessionId) throws Exception { this.hiveConf = hiveConf; sparkClientTimtout = hiveConf.getTimeVar(HiveConf.ConfVars.SPARK_CLIENT_FUTURE_TIMEOUT, TimeUnit.SECONDS); sparkConf = HiveSparkClientFactory.generateSparkConf(conf); this.conf = conf; this.sessionId = sessionId; createRemoteClient(); }
public void initTriggers(final HiveConf conf) { if (triggerValidatorRunnable == null) { final long triggerValidationIntervalMs = HiveConf.getTimeVar(conf, ConfVars .HIVE_TRIGGER_VALIDATION_INTERVAL, TimeUnit.MILLISECONDS); sessionTriggerProvider = new SessionTriggerProvider(openSessions, new LinkedList<>()); triggerActionHandler = new KillTriggerActionHandler(); triggerValidatorRunnable = new TriggerValidatorRunnable( sessionTriggerProvider, triggerActionHandler); startTriggerValidator(triggerValidationIntervalMs); } }
@Override public void refresh() { HiveConf conf = ctx.getConf(); sleepTime = conf.getTimeVar( HiveConf.ConfVars.HIVE_LOCK_SLEEP_BETWEEN_RETRIES, TimeUnit.MILLISECONDS); numRetriesForLock = conf.getIntVar(HiveConf.ConfVars.HIVE_LOCK_NUMRETRIES); numRetriesForUnLock = conf.getIntVar(HiveConf.ConfVars.HIVE_UNLOCK_NUMRETRIES); }
@Override public void refresh() { HiveConf conf = ctx.getConf(); sleepTime = conf.getTimeVar( HiveConf.ConfVars.HIVE_LOCK_SLEEP_BETWEEN_RETRIES, TimeUnit.MILLISECONDS); numRetriesForLock = conf.getIntVar(HiveConf.ConfVars.HIVE_LOCK_NUMRETRIES); numRetriesForUnLock = conf.getIntVar(HiveConf.ConfVars.HIVE_UNLOCK_NUMRETRIES); }
private void initTriggers() { if (triggerValidatorRunnable == null) { final long triggerValidationIntervalMs = HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TRIGGER_VALIDATION_INTERVAL, TimeUnit.MILLISECONDS); TriggerActionHandler<?> triggerActionHandler = new KillMoveTriggerActionHandler(this); triggerValidatorRunnable = new PerPoolTriggerValidatorRunnable(perPoolProviders, triggerActionHandler, triggerValidationIntervalMs); startTriggerValidator(triggerValidationIntervalMs); } }
private void recoverFailedCompactions(boolean remoteOnly) throws MetaException { if (!remoteOnly) txnHandler.revokeFromLocalWorkers(Worker.hostname()); txnHandler.revokeTimedoutWorkers(HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_WORKER_TIMEOUT, TimeUnit.MILLISECONDS)); }
public HiveClientCache(HiveConf hiveConf) { this((int) HiveConf.getTimeVar(hiveConf, HiveConf.ConfVars.METASTORE_CLIENT_CACHE_EXPIRY_TIME, TimeUnit.SECONDS), HiveConf.getIntVar(hiveConf, HiveConf.ConfVars.METASTORE_CLIENT_CACHE_INITIAL_CAPACITY), HiveConf.getIntVar(hiveConf, HiveConf.ConfVars.METASTORE_CLIENT_CACHE_MAX_CAPACITY), HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.METASTORE_CLIENT_CACHE_STATS_ENABLED)); }
@Override public void beforeMethod(HiveTestEnvContext ctx) throws Exception { int sessionTimeout = (int) ctx.hiveConf.getTimeVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_SESSION_TIMEOUT, TimeUnit.MILLISECONDS); zooKeeper = new ZooKeeper("localhost:" + zkPort, sessionTimeout, new Watcher() { @Override public void process(WatchedEvent arg0) { } }); String zkServer = "localhost"; ctx.hiveConf.set("hive.zookeeper.quorum", zkServer); ctx.hiveConf.set("hive.zookeeper.client.port", "" + zkPort); }
protected SparkJobMonitor(HiveConf hiveConf) { monitorTimeoutInterval = hiveConf.getTimeVar(HiveConf.ConfVars.SPARK_JOB_MONITOR_TIMEOUT, TimeUnit.SECONDS); inPlaceUpdate = InPlaceUpdate.canRenderInPlace(hiveConf) && !SessionState.getConsole().getIsSilent(); console = new SessionState.LogHelper(LOG); updateFunction = updateFunction(); }