public ReplicationSourceShipper(Configuration conf, String walGroupId, PriorityBlockingQueue<Path> queue, ReplicationSource source) { this.conf = conf; this.walGroupId = walGroupId; this.queue = queue; this.source = source; this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); // 1 second this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300); // 5 minutes @ 1 sec per this.getEntriesTimeout = this.conf.getInt("replication.source.getEntries.timeout", DEFAULT_TIMEOUT); // 20 seconds }
public ThriftConnection(Configuration conf, ExecutorService pool, final User user) throws IOException { this.conf = conf; this.user = user; this.host = conf.get(Constants.HBASE_THRIFT_SERVER_NAME); this.port = conf.getInt(Constants.HBASE_THRIFT_SERVER_PORT, -1); Preconditions.checkArgument(port > 0); Preconditions.checkArgument(host != null); this.isFramed = conf.getBoolean(Constants.FRAMED_CONF_KEY, Constants.FRAMED_CONF_DEFAULT); this.isCompact = conf.getBoolean(Constants.COMPACT_CONF_KEY, Constants.COMPACT_CONF_DEFAULT); this.operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); this.connectTimeout = conf.getInt(SOCKET_TIMEOUT_CONNECT, DEFAULT_SOCKET_TIMEOUT_CONNECT); String className = conf.get(Constants.HBASE_THRIFT_CLIENT_BUIDLER_CLASS, DefaultThriftClientBuilder.class.getName()); try { Class<?> clazz = Class.forName(className); Constructor<?> constructor = clazz .getDeclaredConstructor(ThriftConnection.class); constructor.setAccessible(true); clientBuilder = (ThriftClientBuilder) constructor.newInstance(this); }catch (Exception e) { throw new IOException(e); } }
public static RecoverableZooKeeper connect(Configuration conf, String ensemble, Watcher watcher, final String identifier) throws IOException { if(ensemble == null) { throw new IOException("Unable to determine ZooKeeper ensemble"); } int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT); if (LOG.isTraceEnabled()) { LOG.trace(identifier + " opening connection to ZooKeeper ensemble=" + ensemble); } int retry = conf.getInt("zookeeper.recovery.retry", 3); int retryIntervalMillis = conf.getInt("zookeeper.recovery.retry.intervalmill", 1000); int maxSleepTime = conf.getInt("zookeeper.recovery.retry.maxsleeptime", 60000); zkDumpConnectionTimeOut = conf.getInt("zookeeper.dump.connection.timeout", 1000); return new RecoverableZooKeeper(ensemble, timeout, watcher, retry, retryIntervalMillis, maxSleepTime, identifier); }
public ExponentialCompactionWindowFactory(CompactionConfiguration comConf) { Configuration conf = comConf.conf; baseWindowMillis = conf.getLong(BASE_WINDOW_MILLIS_KEY, 3600000 * 6); windowsPerTier = conf.getInt(WINDOWS_PER_TIER_KEY, 4); maxTierAgeMillis = conf.getLong(MAX_TIER_AGE_MILLIS_KEY, comConf.getDateTieredMaxStoreFileAgeMillis()); LOG.info(toString()); }
public HealthCheckChore(int sleepTime, Stoppable stopper, Configuration conf) { super("HealthChecker", stopper, sleepTime); LOG.info("Health Check Chore runs every " + StringUtils.formatTime(sleepTime)); this.config = conf; String healthCheckScript = this.config.get(HConstants.HEALTH_SCRIPT_LOC); long scriptTimeout = this.config.getLong(HConstants.HEALTH_SCRIPT_TIMEOUT, HConstants.DEFAULT_HEALTH_SCRIPT_TIMEOUT); healthChecker = new HealthChecker(); healthChecker.init(healthCheckScript, scriptTimeout); this.threshold = config.getInt(HConstants.HEALTH_FAILURE_THRESHOLD, HConstants.DEFAULT_HEALTH_FAILURE_THRESHOLD); this.failureWindow = (long)this.threshold * (long)sleepTime; }
@Override public void start(CoprocessorEnvironment env) throws IOException { RegionCoprocessorEnvironment renv = (RegionCoprocessorEnvironment) env; try { this.cache = ((ZKDataHolder) renv.getSharedData().computeIfAbsent(ZKKEY, k -> { String ensemble = renv.getConfiguration().get(ZK_ENSEMBLE_KEY); int sessionTimeout = renv.getConfiguration().getInt(ZK_SESSION_TIMEOUT_KEY, ZK_SESSION_TIMEOUT_DEFAULT); return new ZKDataHolder(ensemble, sessionTimeout); })).acquire(); } catch (Exception e) { throw new IOException(e); } }
private void readConf(Configuration conf) { int selectorThreads = conf.getInt( SELECTOR_THREADS_CONF_KEY, getSelectorThreads()); int workerThreads = conf.getInt( WORKER_THREADS_CONF_KEY, getWorkerThreads()); int stopTimeoutVal = conf.getInt( STOP_TIMEOUT_CONF_KEY, getStopTimeoutVal()); int acceptQueueSizePerThread = conf.getInt( ACCEPT_QUEUE_SIZE_PER_THREAD_CONF_KEY, getAcceptQueueSizePerThread()); AcceptPolicy acceptPolicy = AcceptPolicy.valueOf(conf.get( ACCEPT_POLICY_CONF_KEY, getAcceptPolicy().toString()).toUpperCase(Locale.ROOT)); super.selectorThreads(selectorThreads) .workerThreads(workerThreads) .stopTimeoutVal(stopTimeoutVal) .acceptQueueSizePerThread(acceptQueueSizePerThread) .acceptPolicy(acceptPolicy); LOG.info("Read configuration selectorThreads:" + selectorThreads + " workerThreads:" + workerThreads + " stopTimeoutVal:" + stopTimeoutVal + "sec" + " acceptQueueSizePerThread:" + acceptQueueSizePerThread + " acceptPolicy:" + acceptPolicy); } }
.map(RegionInfo::getRegionName) .findFirst() .orElseThrow(() -> new IOException("online regions not found in table " + tableName)); long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE); int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
/** * @param htable the {@link org.apache.hadoop.hbase.HTableDescriptor} to scan. */ public void setHTable(Table htable) { Configuration conf = htable.getConfiguration(); logScannerActivity = conf.getBoolean( ScannerCallable.LOG_SCANNER_ACTIVITY, false); logPerRowCount = conf.getInt(LOG_PER_ROW_COUNT, 100); this.htable = htable; }
ManyServersManyRegionsConnection(Configuration conf, ExecutorService pool, User user) throws IOException { super(conf, pool, user); int serverCount = conf.getInt("hbase.test.servers", 10); this.serversByClient = new HashMap<>(serverCount); this.meta = makeMeta(Bytes.toBytes( conf.get("hbase.test.tablename", Bytes.toString(BIG_USER_TABLE))), conf.getInt("hbase.test.regions", 100), conf.getLong("hbase.test.namespace.span", 1000), serverCount); this.conf = conf; }
public static void verifyConfiguration(Configuration conf) { String coprocs = conf.get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY); boolean accessControllerLoaded = false; for (String coproc : coprocs.split(",")) { try { accessControllerLoaded = AccessController.class.isAssignableFrom(Class.forName(coproc)); if (accessControllerLoaded) break; } catch (ClassNotFoundException cnfe) { } } if (!(conf.get(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY).contains( AccessController.class.getName()) && accessControllerLoaded && conf.get( CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY).contains( AccessController.class.getName()))) { throw new RuntimeException("AccessController is missing from a system coprocessor list"); } if (conf.getInt(HFile.FORMAT_VERSION_KEY, 2) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) { throw new RuntimeException("Post 0.96 security features require HFile version >= 3"); } if (!conf.getBoolean(User.HBASE_SECURITY_AUTHORIZATION_CONF_KEY, false)) { throw new RuntimeException("Post 2.0.0 security features require set " + User.HBASE_SECURITY_AUTHORIZATION_CONF_KEY + " to true"); } }
/** * Retrieve JHS bind address from configuration * * @param conf * @return InetSocketAddress */ public static InetSocketAddress getBindAddress(Configuration conf) { return conf.getSocketAddr(XLearningConfiguration.XLEARNING_HISTORY_ADDRESS, conf.get(XLearningConfiguration.XLEARNING_HISTORY_ADDRESS, XLearningConfiguration.DEFAULT_XLEARNING_HISTORY_ADDRESS), conf.getInt(XLearningConfiguration.XLEARNING_HISTORY_PORT, XLearningConfiguration.DEFAULT_XLEARNING_HISTORY_PORT)); }
public RpcRetryingCallerFactory(Configuration conf, RetryingCallerInterceptor interceptor) { this.conf = conf; pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE); long configuredPauseForCQTBE = conf.getLong(HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, pause); if (configuredPauseForCQTBE < pause) { LOG.warn("The " + HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE + " setting: " + configuredPauseForCQTBE + " is smaller than " + HConstants.HBASE_CLIENT_PAUSE + ", will use " + pause + " instead."); this.pauseForCQTBE = pause; } else { this.pauseForCQTBE = configuredPauseForCQTBE; } retries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); startLogErrorsCnt = conf.getInt(AsyncProcess.START_LOG_ERRORS_AFTER_COUNT_KEY, AsyncProcess.DEFAULT_START_LOG_ERRORS_AFTER_COUNT); this.interceptor = interceptor; enableBackPressure = conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE, HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE); rpcTimeout = conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,HConstants.DEFAULT_HBASE_RPC_TIMEOUT); }
public RegionMonitor(Connection connection, String[] monitorTargets, boolean useRegExp, Sink sink, ExecutorService executor, boolean writeSniffing, TableName writeTableName, boolean treatFailureAsError, HashMap<String, Long> configuredReadTableTimeouts, long configuredWriteTableTimeout, long allowedFailures) { super(connection, monitorTargets, useRegExp, sink, executor, treatFailureAsError, allowedFailures); Configuration conf = connection.getConfiguration(); this.writeSniffing = writeSniffing; this.writeTableName = writeTableName; this.writeDataTTL = conf.getInt(HConstants.HBASE_CANARY_WRITE_DATA_TTL_KEY, DEFAULT_WRITE_DATA_TTL); this.regionsLowerLimit = conf.getFloat(HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_LOWERLIMIT_KEY, 1.0f); this.regionsUpperLimit = conf.getFloat(HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_UPPERLIMIT_KEY, 1.5f); this.checkPeriod = conf.getInt(HConstants.HBASE_CANARY_WRITE_TABLE_CHECK_PERIOD_KEY, DEFAULT_WRITE_TABLE_CHECK_PERIOD); this.rawScanEnabled = conf.getBoolean(HConstants.HBASE_CANARY_READ_RAW_SCAN_KEY, false); this.configuredReadTableTimeouts = new HashMap<>(configuredReadTableTimeouts); this.configuredWriteTableTimeout = configuredWriteTableTimeout; }
LOG.info("Waiting until the base znode is available"); String parentZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); ZooKeeper zk = new ZooKeeper(ZKConfig.getZKQuorumServersString(conf), conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT), EmptyWatcher.instance); try { if (zk.exists(parentZNode, false) != null) { LOG.info("Parent znode exists: " + parentZNode); keeperEx = null; break; throw new IOException(keeperEx);
/** * Get integer parameter. * * @param cfg Configuration. * @param name Parameter name. * @param authority Authority. * @param dflt Default value. * @return Integer value. * @throws IOException In case of parse exception. */ public static int parameter(Configuration cfg, String name, String authority, int dflt) throws IOException { String name0 = String.format(name, authority != null ? authority : ""); try { return cfg.getInt(name0, dflt); } catch (NumberFormatException ignore) { throw new IOException("Failed to parse parameter value to integer: " + name0); } }
@Override @Before public void setUp() throws Exception { util = new IntegrationTestingUtility(); Configuration conf = util.getConfiguration(); regionsCountPerServer = conf.getInt(REGION_COUNT_KEY, DEFAULT_REGION_COUNT); regionServerCount = conf.getInt(REGIONSERVER_COUNT_KEY, DEFAULT_REGIONSERVER_COUNT); rowsInIteration = conf.getInt(ROWS_PER_ITERATION_KEY, DEFAULT_ROWS_IN_ITERATION); numIterations = conf.getInt(NUM_ITERATIONS_KEY, DEFAULT_NUM_ITERATIONS); numTables = conf.getInt(NUMBER_OF_TABLES_KEY, DEFAULT_NUMBER_OF_TABLES); sleepTime = conf.getLong(SLEEP_TIME_KEY, SLEEP_TIME_DEFAULT); enableBackup(conf); LOG.info("Initializing cluster with {} region servers.", regionServerCount); util.initializeCluster(regionServerCount); LOG.info("Cluster initialized and ready"); }