public TabletServerBatchReader(ClientContext context, Table.ID tableId, Authorizations authorizations, int numQueryThreads) { checkArgument(context != null, "context is null"); checkArgument(tableId != null, "tableId is null"); checkArgument(authorizations != null, "authorizations is null"); this.context = context; this.authorizations = authorizations; this.tableId = tableId; this.numThreads = numQueryThreads; queryThreadPool = new SimpleThreadPool(numQueryThreads, "batch scanner " + batchReaderInstance + "-"); ranges = null; ex = new Throwable(); }
public MutationWriter(int numSendThreads) { serversMutations = new HashMap<>(); queued = new HashSet<>(); sendThreadPool = new SimpleThreadPool(numSendThreads, this.getClass().getName()); locators = new HashMap<>(); binningThreadPool = new SimpleThreadPool(1, "BinMutations", new SynchronousQueue<>()); binningThreadPool.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy()); }
public LogSorter(ClientContext context, VolumeManager fs, AccumuloConfiguration conf) { this.context = context; this.fs = fs; this.conf = conf; int threadPoolSize = conf.getCount(Property.TSERV_RECOVERY_MAX_CONCURRENT); this.threadPool = new SimpleThreadPool(threadPoolSize, this.getClass().getName()); this.walBlockSize = DfsLogger.getWalBlockSize(conf); }
return; nextLogMaker = new SimpleThreadPool(1, "WALog creator"); nextLogMaker.submit(new LoggingRunnable(log, new Runnable() { @Override
private static synchronized ExecutorService getThreadPool(Master master) { if (threadPool == null) { int threadPoolSize = master.getConfiguration().getCount(Property.MASTER_BULK_THREADPOOL_SIZE); ThreadPoolExecutor pool = new SimpleThreadPool(threadPoolSize, "bulk import"); pool.allowCoreThreadTimeOut(true); threadPool = new TraceExecutorService(pool); } return threadPool; }
SimpleThreadPool workers = new SimpleThreadPool(workerCount, "bulk move"); List<Future<Exception>> results = new ArrayList<>();
final ThreadPoolExecutor pool = new SimpleThreadPool(executorThreads, "ClientPool");
SimpleThreadPool workers = new SimpleThreadPool(workerCount, "bulkDir move"); List<Future<Boolean>> results = new ArrayList<>();
private void setupReplication(AccumuloConfiguration aconf) { // Start the thrift service listening for incoming replication requests try { startReplicationService(); } catch (UnknownHostException e) { throw new RuntimeException("Failed to start replication service", e); } // Start the pool to handle outgoing replications final ThreadPoolExecutor replicationThreadPool = new SimpleThreadPool( getConfiguration().getCount(Property.REPLICATION_WORKER_THREADS), "replication task"); replWorker.setExecutor(replicationThreadPool); replWorker.run(); // Check the configuration value for the size of the pool and, if changed, resize the pool Runnable replicationWorkThreadPoolResizer = () -> { int maxPoolSize = aconf.getCount(Property.REPLICATION_WORKER_THREADS); if (replicationThreadPool.getMaximumPoolSize() != maxPoolSize) { log.info("Resizing thread pool for sending replication work from {} to {}", replicationThreadPool.getMaximumPoolSize(), maxPoolSize); replicationThreadPool.setMaximumPoolSize(maxPoolSize); } }; SimpleTimer.getInstance(aconf).schedule(replicationWorkThreadPoolResizer, 10000, 30000); }
log.info("{} offline", tableName); SimpleThreadPool pool = new SimpleThreadPool(50, "directory maker"); log.info("Rewriting entries for {}", tableName); Scanner scanner = context.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
ThreadPoolExecutor distWorkQThreadPool = new SimpleThreadPool( getConfiguration().getCount(Property.TSERV_WORKQ_THREADS), "distributed work queue");
public TabletServerBatchReader(ClientContext context, String tableId, Authorizations authorizations, int numQueryThreads) { checkArgument(context != null, "context is null"); checkArgument(tableId != null, "tableId is null"); checkArgument(authorizations != null, "authorizations is null"); this.context = context; this.authorizations = authorizations; this.tableId = tableId; this.numThreads = numQueryThreads; queryThreadPool = new SimpleThreadPool(numQueryThreads, "batch scanner " + batchReaderInstance + "-"); ranges = null; ex = new Throwable(); }
public MutationWriter(int numSendThreads) { serversMutations = new HashMap<>(); queued = new HashSet<>(); sendThreadPool = new SimpleThreadPool(numSendThreads, this.getClass().getName()); locators = new HashMap<>(); binningThreadPool = new SimpleThreadPool(1, "BinMutations", new SynchronousQueue<Runnable>()); binningThreadPool.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy()); }
public LogSorter(Instance instance, FileSystem fs, AccumuloConfiguration conf) { this.instance = instance; this.fs = fs; this.conf = conf; int threadPoolSize = conf.getCount(Property.TSERV_RECOVERY_MAX_CONCURRENT); this.threadPool = new SimpleThreadPool(threadPoolSize, this.getClass().getName()); }
public LogSorter(Instance instance, VolumeManager fs, AccumuloConfiguration conf) { this.instance = instance; this.fs = fs; this.conf = conf; int threadPoolSize = conf.getCount(Property.TSERV_RECOVERY_MAX_CONCURRENT); this.threadPool = new SimpleThreadPool(threadPoolSize, this.getClass().getName()); }
private static synchronized ExecutorService getThreadPool(Master master) { if (threadPool == null) { int threadPoolSize = master.getSystemConfiguration().getCount(Property.MASTER_BULK_THREADPOOL_SIZE); ThreadPoolExecutor pool = new SimpleThreadPool(threadPoolSize, "bulk import"); pool.allowCoreThreadTimeOut(true); threadPool = new TraceExecutorService(pool); } return threadPool; }
private static synchronized ExecutorService getThreadPool(Master master) { if (threadPool == null) { int threadPoolSize = master.getConfiguration().getCount(Property.MASTER_BULK_THREADPOOL_SIZE); ThreadPoolExecutor pool = new SimpleThreadPool(threadPoolSize, "bulk import"); pool.allowCoreThreadTimeOut(true); threadPool = new TraceExecutorService(pool); } return threadPool; }
SimpleThreadPool pool = new SimpleThreadPool(1, "waitForBalance"); Future<Boolean> wait = pool.submit(new Callable<Boolean>() { @Override
@Override public void visit(State state, Environment env, Properties props) throws Exception { Random rand = new Random(); String hostname = InetAddress.getLocalHost().getHostName().replaceAll("[-.]", "_"); String pid = env.getPid(); tableName = String.format("bulk_%s_%s_%d", hostname, pid, System.currentTimeMillis()); log.info("Starting bulk test on " + tableName); TableOperations tableOps = env.getConnector().tableOperations(); try { if (!tableOps.exists(getTableName())) { tableOps.create(getTableName()); IteratorSetting is = new IteratorSetting(10, SummingCombiner.class); SummingCombiner.setEncodingType(is, LongCombiner.Type.STRING); SummingCombiner.setCombineAllColumns(is, true); tableOps.attachIterator(getTableName(), is); } } catch (TableExistsException ex) { // expected if there are multiple walkers } state.set("rand", rand); state.set("fs", FileSystem.get(CachedConfiguration.getInstance())); state.set("bulkImportSuccess", "true"); BulkPlusOne.counter.set(0l); ThreadPoolExecutor e = new SimpleThreadPool(MAX_POOL_SIZE, "bulkImportPool"); state.set("pool", e); }
Runnable r3 = new QSRunnable(qsc, qs3); ExecutorService executorService = new SimpleThreadPool(10, "QSExecutor"); executorService.execute(r1); executorService.execute(r2);