@Override public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put, final WALEdit edit, final Durability durability) throws IOException { Threads.sleep(SLEEP_TIME); }
private void createBatchPool(Configuration conf) { // Use the same config for keep alive as in ConnectionImplementation.getBatchPool(); int maxThreads = conf.getInt("hbase.multihconnection.threads.max", 256); if (maxThreads == 0) { maxThreads = Runtime.getRuntime().availableProcessors() * 8; } long keepAliveTime = conf.getLong("hbase.multihconnection.threads.keepalivetime", 60); LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>(maxThreads * conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS)); ThreadPoolExecutor tpe = new ThreadPoolExecutor(maxThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, Threads.newDaemonThreadFactory("MultiHConnection" + "-shared-")); tpe.allowCoreThreadTimeOut(true); this.batchPool = tpe; }
@Override public void preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan) throws IOException { if (SLEEP_MS > 0) { Threads.sleepWithoutInterrupt(SLEEP_MS); } } }
public static ThreadPoolExecutor createExecutor(final Configuration conf, final String name) { int maxThreads = conf.getInt("hbase.snapshot.thread.pool.max", 8); return Threads.getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS, Threads.getNamedThreadFactory(name)); }
public boolean start() { if (running.getAndSet(true)) { LOG.warn("Already running"); return false; } LOG.info("Instantiated, coreThreads={} (allowCoreThreadTimeOut=true), queueMaxSize={}, " + "operationDelay={}", this.corePoolSize, this.queueMaxSize, this.operationDelay); // Create the timeout executor timeoutExecutor = new TimeoutExecutorThread(); timeoutExecutor.start(); // Create the thread pool that will execute RPCs threadPool = Threads.getBoundedCachedThreadPool(corePoolSize, 60L, TimeUnit.SECONDS, Threads.newDaemonThreadFactory(this.getClass().getSimpleName(), getUncaughtExceptionHandler())); return true; }
@Test(timeout = 60000) public void testMasterInitWithObserverModeClientZKQuorum() throws Exception { Configuration conf = new Configuration(TESTUTIL.getConfiguration()); Assert.assertFalse(Boolean.getBoolean(HConstants.CLIENT_ZOOKEEPER_OBSERVER_MODE)); // set client ZK to some non-existing address and make sure server won't access client ZK // (server start should not be affected) conf.set(HConstants.CLIENT_ZOOKEEPER_QUORUM, HConstants.LOCALHOST); conf.setInt(HConstants.CLIENT_ZOOKEEPER_CLIENT_PORT, TESTUTIL.getZkCluster().getClientPort() + 1); // settings to allow us not to start additional RS conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 1); conf.setBoolean(LoadBalancer.TABLES_ON_MASTER, true); // main setting for this test case conf.setBoolean(HConstants.CLIENT_ZOOKEEPER_OBSERVER_MODE, true); HMaster master = new HMaster(conf); master.start(); while (!master.isInitialized()) { Threads.sleep(200); } Assert.assertNull(master.metaLocationSyncer); Assert.assertNull(master.masterAddressSyncer); master.stopMaster(); master.join(); } }
getConf().set("hbase.client.connection.impl", ManyServersManyRegionsConnection.class.getName()); getConf().set("hbase.client.registry.impl", SimpleRegistry.class.getName()); getConf().setInt("hbase.client.start.log.errors.counter", 0); getConf().setLong("hbase.test.namespace.span", namespaceSpan); getConf().setLong("hbase.test.servers", servers); getConf().set("hbase.test.tablename", Bytes.toString(BIG_USER_TABLE)); getConf().setLong("hbase.test.multi.pause.when.done", multiPause); final ExecutorService pool = Executors.newCachedThreadPool(Threads.getNamedThreadFactory("p"));
@Test public void testTableCreation() throws Exception { conf.set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, StochasticLoadBalancer.class.getName()); UTIL.startMiniCluster(SLAVES); while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) { Threads.sleep(1); Admin admin = UTIL.getAdmin(); admin.setBalancerRunning(false, true); UTIL.getConfiguration().set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, FavoredStochasticBalancer.class.getName()); UTIL.restartHBaseCluster(SLAVES); while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) { Threads.sleep(1); admin = UTIL.getAdmin(); UTIL.waitTableAvailable(desc.getTableName()); FavoredNodesManager fnm = UTIL.getHBaseCluster().getMaster().getFavoredNodesManager(); List<HRegionInfo> regionsOfTable = admin.getTableRegions(TableName.valueOf(tableName)); for (HRegionInfo rInfo : regionsOfTable) { Set<ServerName> favNodes = Sets.newHashSet(fnm.getFavoredNodes(rInfo));
@BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, MasterSyncCoprocessor.class.getName()); conf.setInt("hbase.regionserver.handler.count", 2); conf.setInt("hbase.regionserver.metahandler.count", 30); conf.setInt("hbase.htable.threads.max", POOL_SIZE); conf.setInt("hbase.hconnection.threads.max", 2 * POOL_SIZE); conf.setInt("hbase.hbck.close.timeout", 2 * REGION_ONLINE_TIMEOUT); conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 8 * REGION_ONLINE_TIMEOUT); TEST_UTIL.startMiniCluster(1); tableExecutorService = new ThreadPoolExecutor(1, POOL_SIZE, 60, TimeUnit.SECONDS, new SynchronousQueue<>(), Threads.newDaemonThreadFactory("testhbck")); hbfsckExecutorService = new ScheduledThreadPoolExecutor(POOL_SIZE); AssignmentManager assignmentManager = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager(); regionStates = assignmentManager.getRegionStates(); connection = (ClusterConnection) TEST_UTIL.getConnection(); admin = connection.getAdmin(); admin.setBalancerRunning(false, true); TEST_UTIL.waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME); }
@Before public void setUp() throws Exception { TEST_UTIL = new HBaseTestingUtility(); TEST_UTIL.startMiniZKCluster(); AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN)); Configuration conf = TEST_UTIL.getConfiguration(); conf.set("hadoop.security.authentication", "kerberos"); conf.set("hbase.security.authentication", "kerberos"); conf.setBoolean(HADOOP_SECURITY_AUTHORIZATION, true); conf.set(RpcServerFactory.CUSTOM_RPC_SERVER_IMPL_CONF_KEY, rpcServerImpl); server = new TokenServer(conf, TEST_UTIL); serverThread = new Thread(server); Threads.setDaemonThreadRunning(serverThread, "TokenServer:"+server.getServerName().toString());
@Test public void testFlushedSequenceIdPersistLoad() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); int msgInterval = conf.getInt("hbase.regionserver.msginterval", 100); // insert some data into META TableName tableName = TableName.valueOf("testFlushSeqId"); HTableDescriptor desc = new HTableDescriptor(tableName); desc.addFamily(new HColumnDescriptor(Bytes.toBytes("cf"))); Table table = TEST_UTIL.createTable(desc, null); // flush META region TEST_UTIL.flush(TableName.META_TABLE_NAME); // wait for regionserver report Threads.sleep(msgInterval * 2); // record flush seqid before cluster shutdown Map<byte[], Long> regionMapBefore = TEST_UTIL.getHBaseCluster().getMaster().getServerManager() .getFlushedSequenceIdByRegion(); // restart hbase cluster which will cause flushed sequence id persist and reload TEST_UTIL.getMiniHBaseCluster().shutdown(); TEST_UTIL.restartHBaseCluster(2); TEST_UTIL.waitUntilNoRegionsInTransition(); // check equality after reloading flushed sequence id map Map<byte[], Long> regionMapAfter = TEST_UTIL.getHBaseCluster().getMaster().getServerManager() .getFlushedSequenceIdByRegion(); assertTrue(regionMapBefore.equals(regionMapAfter)); }
conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX, conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT)), nbFiles); LOG.info("pid=" + getProcId() + " splitting " + nbFiles + " storefiles, region=" + getParentRegion().getShortNameToLog() + ", threads=" + maxThreads); final ExecutorService threadPool = Executors.newFixedThreadPool( maxThreads, Threads.getNamedThreadFactory("StoreFileSplitter-%1$d")); final List<Future<Pair<Path,Path>>> futures = new ArrayList<Future<Pair<Path,Path>>>(nbFiles); byte[] familyName = Bytes.toBytes(e.getKey()); final ColumnFamilyDescriptor hcd = htd.getColumnFamily(familyName); final Collection<StoreFileInfo> storeFiles = e.getValue(); long fileSplitTimeout = conf.getLong("hbase.master.fileSplitTimeout", conf.getLong("hbase.regionserver.fileSplitTimeout", 600000)); try {
@BeforeClass public static void setUpBeforeClass() throws Exception { GROUP = new NioEventLoopGroup(1, Threads.newDaemonThreadFactory("TestAsyncWALReplay")); CHANNEL_CLASS = NioSocketChannel.class; Configuration conf = AbstractTestWALReplay.TEST_UTIL.getConfiguration(); conf.set(WALFactory.WAL_PROVIDER, "asyncfs"); AbstractTestWALReplay.setUpBeforeClass(); }
@Test public void testSplitTable() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); Table t = TEST_UTIL.createTable(tableName, Bytes.toBytes("f"), splitKeys); TEST_UTIL.waitUntilAllRegionsAssigned(tableName); final int numberOfRegions = admin.getTableRegions(t.getName()).size(); byte[] splitPoint = Bytes.toBytes(0); RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); HRegionInfo parent = locator.getRegionLocation(splitPoint).getRegionInfo(); List<ServerName> parentFN = fnm.getFavoredNodes(parent); admin.split(tableName, splitPoint); TEST_UTIL.waitUntilNoRegionsInTransition(WAIT_TIMEOUT); HRegionInfo daughter1 = locator.getRegionLocation(parent.getStartKey(), true).getRegionInfo(); List<ServerName> daughter1FN = fnm.getFavoredNodes(daughter1); TEST_UTIL.getMiniHBaseCluster().compact(tableName, true); admin.runCatalogScan(); TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor()); Threads.sleep(2000); LOG.info("STARTING DELETE"); TEST_UTIL.deleteTable(tableName);
final boolean failIfWALExists, final String prefix, final String suffix) throws IOException { super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); this.minTolerableReplication = conf.getInt("hbase.regionserver.hlog.tolerable.lowreplication", FSUtils.getDefaultReplication(fs, this.walDir)); this.lowReplicationRollLimit = conf.getInt("hbase.regionserver.hlog.lowreplication.rolllimit", 5); this.closeErrorsTolerated = conf.getInt("hbase.regionserver.logroll.errors.tolerated", 2); this.useHsync = conf.getBoolean(HRegion.WAL_HSYNC_CONF_KEY, HRegion.DEFAULT_WAL_HSYNC); getPreallocatedEventCount(), Threads.getNamedThreadFactory(hostingThreadName + ".append"), ProducerType.MULTI, new BlockingWaitStrategy());
@BeforeClass public static void startCluster() throws Exception { metricsHelper = CompatibilityFactory.getInstance(MetricsAssertHelper.class); TEST_UTIL = new HBaseTestingUtility(); TABLES_ON_MASTER = LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration()); conf = TEST_UTIL.getConfiguration(); conf.getLong("hbase.splitlog.max.resubmit", 0); // Make the failure test faster conf.setInt("zookeeper.recovery.retry", 0); // testMobMetrics creates few hfiles and manages compaction manually. conf.setInt("hbase.hstore.compactionThreshold", 100); conf.setInt("hbase.hstore.compaction.max", 100); conf.setInt("hbase.regionserver.periodicmemstoreflusher.rangeofdelayseconds", 4*60); conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1); TEST_UTIL.startMiniCluster(); cluster = TEST_UTIL.getHBaseCluster(); cluster.waitForActiveAndReadyMaster(); admin = TEST_UTIL.getAdmin(); connection = TEST_UTIL.getConnection(); while (cluster.getLiveRegionServerThreads().isEmpty() && cluster.getRegionServer(0) == null && rs.getRegionServerMetrics() == null) { Threads.sleep(100); } rs = cluster.getRegionServer(0); metricsRegionServer = rs.getRegionServerMetrics(); serverSource = metricsRegionServer.getMetricsSource(); }
@BeforeClass public static void startCluster() throws Exception { metricsHelper = CompatibilityFactory.getInstance(MetricsAssertHelper.class); TEST_UTIL = new HBaseTestingUtility(); conf = TEST_UTIL.getConfiguration(); conf.getLong("hbase.splitlog.max.resubmit", 0); // Make the failure test faster conf.setInt("zookeeper.recovery.retry", 0); conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1); TEST_UTIL.startMiniCluster(2); cluster = TEST_UTIL.getHBaseCluster(); cluster.waitForActiveAndReadyMaster(); while (cluster.getLiveRegionServerThreads().size() < 2) { Threads.sleep(100); } }
@Test public void testPreWALRestoreSkip() throws Exception { LOG.info(TestRegionObserverInterface.class.getName() + "." + name.getMethodName()); TableName tableName = TableName.valueOf(SimpleRegionObserver.TABLE_SKIPPED); Table table = util.createTable(tableName, new byte[][] { A, B, C }); try (RegionLocator locator = util.getConnection().getRegionLocator(tableName)) { JVMClusterUtil.RegionServerThread rs1 = cluster.startRegionServer(); ServerName sn2 = rs1.getRegionServer().getServerName(); String regEN = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); util.getAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes()); while (!sn2.equals(locator.getAllRegionLocations().get(0).getServerName())) { Thread.sleep(100); } Put put = new Put(ROW); put.addColumn(A, A, A); put.addColumn(B, B, B); put.addColumn(C, C, C); table.put(put); cluster.killRegionServer(rs1.getRegionServer().getServerName()); Threads.sleep(20000); // just to be sure that the kill has fully started. util.waitUntilAllRegionsAssigned(tableName); } verifyMethodResult(SimpleRegionObserver.class, new String[] { "getCtPreWALRestore", "getCtPostWALRestore", }, tableName, new Integer[] { 0, 0 }); util.deleteTable(tableName); table.close(); }
TaskMonitor(Configuration conf) { maxTasks = conf.getInt(MAX_TASKS_KEY, DEFAULT_MAX_TASKS); expirationTime = conf.getLong(EXPIRATION_TIME_KEY, DEFAULT_EXPIRATION_TIME); rpcWarnTime = conf.getLong(RPC_WARN_TIME_KEY, DEFAULT_RPC_WARN_TIME); tasks = new CircularFifoQueue(maxTasks); rpcTasks = Lists.newArrayList(); monitorInterval = conf.getLong(MONITOR_INTERVAL_KEY, DEFAULT_MONITOR_INTERVAL); monitorThread = new Thread(new MonitorRunnable()); Threads.setDaemonThreadRunning(monitorThread, "Monitor thread for TaskMonitor"); }
@Override public void run() { boolean b = this.conf.getBoolean(RUN_SHUTDOWN_HOOK, true); LOG.info("Shutdown hook starting; " + RUN_SHUTDOWN_HOOK + "=" + b + "; fsShutdownHook=" + this.fsShutdownHook); if (b) { this.stop.stop("Shutdown hook"); Threads.shutdown(this.threadToJoin); if (this.fsShutdownHook != null) { synchronized (fsShutdownHooks) { int refs = fsShutdownHooks.get(fsShutdownHook); if (refs == 1) { LOG.info("Starting fs shutdown hook thread."); Thread fsShutdownHookThread = (fsShutdownHook instanceof Thread) ? (Thread)fsShutdownHook : new Thread(fsShutdownHook, fsShutdownHook.getClass().getSimpleName() + "-shutdown-hook"); fsShutdownHookThread.start(); Threads.shutdown(fsShutdownHookThread, this.conf.getLong(FS_SHUTDOWN_HOOK_WAIT, 30000)); } if (refs > 0) { fsShutdownHooks.put(fsShutdownHook, refs - 1); } } } } LOG.info("Shutdown hook finished."); } }