ExecutorService executorService = new ExecutorService("unit_test"); executorService.startExecutorService( ExecutorType.MASTER_SERVER_OPERATIONS, maxThreads); executorService.getExecutor(ExecutorType.MASTER_SERVER_OPERATIONS); ThreadPoolExecutor pool = executor.threadPoolExecutor; executorService.submit( new TestEventHandler(mockedServer, EventType.M_SERVER_SHUTDOWN, lock, counter)); executorService.submit( new TestEventHandler(mockedServer, EventType.M_SERVER_SHUTDOWN, lock, counter)); assertEquals(maxThreads, pool.getPoolSize()); executorService.shutdown(); assertEquals(0, executorService.getAllExecutorStatuses().size()); executorService.submit( new TestEventHandler(mockedServer, EventType.M_SERVER_SHUTDOWN, lock, counter));
public void startExecutorService(final ExecutorType type, final int maxThreads) { String name = type.getExecutorName(this.servername); if (isExecutorServiceRunning(name)) { LOG.debug("Executor service " + toString() + " already running on " + this.servername); return; } startExecutorService(name, maxThreads); }
@Test public void testAborting() throws Exception { final Configuration conf = HBaseConfiguration.create(); final Server server = mock(Server.class); when(server.getConfiguration()).thenReturn(conf); ExecutorService executorService = new ExecutorService("unit_test"); executorService.startExecutorService( ExecutorType.MASTER_SERVER_OPERATIONS, 1); executorService.submit(new EventHandler(server, EventType.M_SERVER_SHUTDOWN) { @Override public void process() throws IOException { throw new RuntimeException("Should cause abort"); } }); Waiter.waitFor(conf, 30000, new Predicate<Exception>() { @Override public boolean evaluate() throws Exception { try { verify(server, times(1)).abort(anyString(), (Throwable) anyObject()); return true; } catch (Throwable t) { return false; } } }); executorService.shutdown(); }
public void executeProcedure(long procId, RSProcedureCallable callable) { executorService.submit(new RSProcedureHandler(this, procId, callable)); }
@Before public void setup() throws Exception { TEST_UTIL.startMiniZKCluster(); Configuration conf = TEST_UTIL.getConfiguration(); zkw = new ZKWatcher(TEST_UTIL.getConfiguration(), "split-log-worker-tests", null); ds = new DummyServer(zkw, conf); ZKUtil.deleteChildrenRecursively(zkw, zkw.getZNodePaths().baseZNode); ZKUtil.createAndFailSilent(zkw, zkw.getZNodePaths().baseZNode); assertThat(ZKUtil.checkExists(zkw, zkw.getZNodePaths().baseZNode), not(is(-1))); LOG.debug(zkw.getZNodePaths().baseZNode + " created"); ZKUtil.createAndFailSilent(zkw, zkw.getZNodePaths().splitLogZNode); assertThat(ZKUtil.checkExists(zkw, zkw.getZNodePaths().splitLogZNode), not(is(-1))); LOG.debug(zkw.getZNodePaths().splitLogZNode + " created"); ZKUtil.createAndFailSilent(zkw, zkw.getZNodePaths().rsZNode); assertThat(ZKUtil.checkExists(zkw, zkw.getZNodePaths().rsZNode), not(is(-1))); SplitLogCounters.resetCounters(); executorService = new ExecutorService("TestSplitLogWorker"); executorService.startExecutorService(ExecutorType.RS_LOG_REPLAY_OPS, 10); }
this.executorService = new ExecutorService(getName()); putUpWebUI(); } catch (Throwable t) {
Executor getExecutor(final ExecutorType type) { return getExecutor(type.getExecutorName(this.servername)); }
@After public void teardown() throws Exception { if (executorService != null) { executorService.shutdown(); } TEST_UTIL.shutdownMiniZKCluster(); }
private void startServiceThreads() throws IOException{ this.executorService.startExecutorService(ExecutorType.MASTER_OPEN_REGION, conf.getInt("hbase.master.executor.openregion.threads", 5)); this.executorService.startExecutorService(ExecutorType.MASTER_CLOSE_REGION, conf.getInt("hbase.master.executor.closeregion.threads", 5)); this.executorService.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS, conf.getInt("hbase.master.executor.serverops.threads", 5)); this.executorService.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS, conf.getInt("hbase.master.executor.meta.serverops.threads", 5)); this.executorService.startExecutorService(ExecutorType.M_LOG_REPLAY_OPS, conf.getInt("hbase.master.executor.logreplayops.threads", 10)); this.executorService.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1); startProcedureExecutor();
public void submit(final EventHandler eh) { Executor executor = getExecutor(getExecutorServiceType(eh.getEventType())); if (executor == null) { // This happens only when events are submitted after shutdown() was // called, so dropping them should be "ok" since it means we're // shutting down. LOG.error("Cannot submit [" + eh + "] because the executor is missing." + " Is this process shutting down?"); } else { executor.submit(eh); } }
protected void dumpExecutors(ExecutorService service, PrintWriter out) throws IOException { if (service == null) { out.println("ExecutorService is not initialized"); return; } Map<String, ExecutorStatus> statuses = service.getAllExecutorStatuses(); for (ExecutorStatus status : statuses.values()) { status.dumpTo(out, " "); } } }
ParallelSeekHandler seekHandler = new ParallelSeekHandler(scanner, kv, this.readPt, latch); executor.submit(seekHandler); handlers.add(seekHandler); } else {
@Test public void testAborting() throws Exception { final Configuration conf = HBaseConfiguration.create(); final Server server = mock(Server.class); when(server.getConfiguration()).thenReturn(conf); ExecutorService executorService = new ExecutorService("unit_test"); executorService.startExecutorService( ExecutorType.MASTER_SERVER_OPERATIONS, 1); executorService.submit(new EventHandler(server, EventType.M_SERVER_SHUTDOWN) { @Override public void process() throws IOException { throw new RuntimeException("Should cause abort"); } }); Waiter.waitFor(conf, 30000, new Predicate<Exception>() { @Override public boolean evaluate() throws Exception { try { verify(server, times(1)).abort(anyString(), (Throwable) anyObject()); return true; } catch (Throwable t) { return false; } } }); executorService.shutdown(); }
String string = org.apache.hadoop.hbase.executor.EventType.RS_COMPACTED_FILES_DISCHARGER .toString(); ExecutorService es = new ExecutorService(string); es.startExecutorService( string+"-"+string, 1); when(rss.getExecutorService()).thenReturn(es);
service_ = new ExecutorService();
@VisibleForTesting public ThreadPoolExecutor getExecutorThreadPool(final ExecutorType type) { return getExecutor(type).getThreadPoolExecutor(); }
this.compactSplitThread.join(); if (this.executorService != null) this.executorService.shutdown(); if (this.replicationSourceHandler != null && this.replicationSourceHandler == this.replicationSinkHandler) {
this.executorService.startExecutorService(ExecutorType.RS_OPEN_REGION, conf.getInt("hbase.regionserver.executor.openregion.threads", 3)); this.executorService.startExecutorService(ExecutorType.RS_OPEN_META, conf.getInt("hbase.regionserver.executor.openmeta.threads", 1)); this.executorService.startExecutorService(ExecutorType.RS_OPEN_PRIORITY_REGION, conf.getInt("hbase.regionserver.executor.openpriorityregion.threads", 3)); this.executorService.startExecutorService(ExecutorType.RS_CLOSE_REGION, conf.getInt("hbase.regionserver.executor.closeregion.threads", 3)); this.executorService.startExecutorService(ExecutorType.RS_CLOSE_META, conf.getInt("hbase.regionserver.executor.closemeta.threads", 1)); if (conf.getBoolean(StoreScanner.STORESCANNER_PARALLEL_SEEK_ENABLE, false)) { this.executorService.startExecutorService(ExecutorType.RS_PARALLEL_SEEK, conf.getInt("hbase.storescanner.parallel.seek.threads", 10)); this.executorService.startExecutorService(ExecutorType.RS_LOG_REPLAY_OPS, conf.getInt( HBASE_SPLIT_WAL_MAX_SPLITTER, DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER)); this.executorService.startExecutorService(ExecutorType.RS_COMPACTED_FILES_DISCHARGER, conf.getInt(CompactionConfiguration.HBASE_HFILE_COMPACTION_DISCHARGER_THREAD_COUNT, 10)); if (ServerRegionReplicaUtil.isRegionReplicaWaitForPrimaryFlushEnabled(conf)) { this.executorService.startExecutorService(ExecutorType.RS_REGION_REPLICA_FLUSH_OPS, conf.getInt("hbase.regionserver.region.replica.flusher.threads", conf.getInt("hbase.regionserver.executor.openregion.threads", 3))); this.executorService.startExecutorService(ExecutorType.RS_REFRESH_PEER, conf.getInt("hbase.regionserver.executor.refresh.peer.threads", 2)); this.executorService.startExecutorService(ExecutorType.RS_REPLAY_SYNC_REPLICATION_WAL, conf.getInt("hbase.regionserver.executor.replay.sync.replication.wal.threads", 1)); this.executorService.startExecutorService(ExecutorType.RS_SWITCH_RPC_THROTTLE,
protected void dumpExecutors(ExecutorService service, PrintWriter out) throws IOException { Map<String, ExecutorStatus> statuses = service.getAllExecutorStatuses(); for (ExecutorStatus status : statuses.values()) { status.dumpTo(out, " "); } } }
ExecutorService executorService = new ExecutorService("unit_test"); executorService.startExecutorService( ExecutorType.MASTER_SERVER_OPERATIONS, maxThreads); executorService.getExecutor(ExecutorType.MASTER_SERVER_OPERATIONS); ThreadPoolExecutor pool = executor.threadPoolExecutor; executorService.submit( new TestEventHandler(mockedServer, EventType.M_SERVER_SHUTDOWN, lock, counter)); executorService.submit( new TestEventHandler(mockedServer, EventType.M_SERVER_SHUTDOWN, lock, counter)); assertEquals(maxThreads, pool.getPoolSize()); executorService.shutdown(); assertEquals(0, executorService.getAllExecutorStatuses().size()); executorService.submit( new TestEventHandler(mockedServer, EventType.M_SERVER_SHUTDOWN, lock, counter));