SimpleThreadPool workers = new SimpleThreadPool(workerCount, "bulkDir move"); List<Future<Boolean>> results = new ArrayList<>(); results.add(workers.submit(() -> { final Path originalPath = new Path(sourceDir, renameEntry.getKey()); Path newPath = new Path(bulkDir, renameEntry.getValue()); })); workers.shutdown(); while (!workers.awaitTermination(1000L, TimeUnit.MILLISECONDS)) {}
public SimpleThreadPool(int max, final String name) { super(max, max, 4L, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), new NamingThreadFactory(name)); allowCoreThreadTimeOut(true); }
void queueMutations(final MutationSet mutationsToSend) { if (mutationsToSend == null) return; binningThreadPool.execute(Trace.wrap(() -> { if (mutationsToSend != null) { try { log.trace("{} - binning {} mutations", Thread.currentThread().getName(), mutationsToSend.size()); addMutations(mutationsToSend); } catch (Exception e) { updateUnknownErrors("Error processing mutation set", e); } } })); }
public MutationWriter(int numSendThreads) { serversMutations = new HashMap<>(); queued = new HashSet<>(); sendThreadPool = new SimpleThreadPool(numSendThreads, this.getClass().getName()); locators = new HashMap<>(); binningThreadPool = new SimpleThreadPool(1, "BinMutations", new SynchronousQueue<>()); binningThreadPool.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy()); }
public TabletServerBatchReader(ClientContext context, Table.ID tableId, Authorizations authorizations, int numQueryThreads) { checkArgument(context != null, "context is null"); checkArgument(tableId != null, "tableId is null"); checkArgument(authorizations != null, "authorizations is null"); this.context = context; this.authorizations = authorizations; this.tableId = tableId; this.numThreads = numQueryThreads; queryThreadPool = new SimpleThreadPool(numQueryThreads, "batch scanner " + batchReaderInstance + "-"); ranges = null; ex = new Throwable(); }
SimpleThreadPool threads = new SimpleThreadPool(NUM_THREADS, "ClientThreads"); threads.allowCoreThreadTimeOut(false); threads.prestartAllCoreThreads(); threads.execute(new Runnable() { @Override public void run() { threads.shutdown(); threads.awaitTermination(3, TimeUnit.MINUTES); bw.close(); Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
SimpleThreadPool es = new SimpleThreadPool(batchSize * 2, "concurrent-api-requests"); for (String tableName : tableNames) { final String finalName = tableName; for (int batch = 0; batch < batches; batch++) { for (int i = 0; i < batchSize; i++) { Future<?> f = es.submit(itr.next()); results.add(f); f = es.submit(itr.next()); results.add(f); List<Runnable> queued = es.shutdownNow(); Assert.assertTrue("Had more tasks to run", queued.isEmpty()); Assert.assertFalse("Had more tasks that needed to be submitted", itr.hasNext());
SimpleThreadPool pool = new SimpleThreadPool(1, "waitForBalance"); Future<Boolean> wait = pool.submit(new Callable<Boolean>() { @Override public Boolean call() throws Exception {
public synchronized void close() throws MutationsRejectedException { if (closed) return; Span span = Trace.start("close"); try { closed = true; startProcessing(); waitRTE(() -> totalMemUsed > 0 && !somethingFailed); logStats(); checkForFailures(); } finally { // make a best effort to release these resources writer.binningThreadPool.shutdownNow(); writer.sendThreadPool.shutdownNow(); jtimer.cancel(); span.stop(); } }
public LogSorter(ClientContext context, VolumeManager fs, AccumuloConfiguration conf) { this.context = context; this.fs = fs; this.conf = conf; int threadPoolSize = conf.getCount(Property.TSERV_RECOVERY_MAX_CONCURRENT); this.threadPool = new SimpleThreadPool(threadPoolSize, this.getClass().getName()); this.walBlockSize = DfsLogger.getWalBlockSize(conf); }
public MutationWriter(int numSendThreads) { serversMutations = new HashMap<>(); queued = new HashSet<>(); sendThreadPool = new SimpleThreadPool(numSendThreads, this.getClass().getName()); locators = new HashMap<>(); binningThreadPool = new SimpleThreadPool(1, "BinMutations", new SynchronousQueue<Runnable>()); binningThreadPool.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy()); }
public synchronized void close() throws MutationsRejectedException { if (closed) return; Span span = Trace.start("close"); try { closed = true; startProcessing(); waitRTE(new WaitCondition() { @Override public boolean shouldWait() { return totalMemUsed > 0 && !somethingFailed; } }); logStats(); checkForFailures(); } finally { // make a best effort to release these resources writer.binningThreadPool.shutdownNow(); writer.sendThreadPool.shutdownNow(); jtimer.cancel(); span.stop(); } }
SimpleThreadPool workers = new SimpleThreadPool(workerCount, "bulk move"); List<Future<Exception>> results = new ArrayList<>(); results.add(workers.submit(() -> { try { String sa[] = fileStatus.getPath().getName().split("\\."); })); workers.shutdown(); while (!workers.awaitTermination(1000L, TimeUnit.MILLISECONDS)) {}
return; nextLogMaker = new SimpleThreadPool(1, "WALog creator"); nextLogMaker.submit(new LoggingRunnable(log, new Runnable() { @Override
public SimpleThreadPool(int max, final String name, BlockingQueue<Runnable> queue) { super(max, max, 4L, TimeUnit.SECONDS, queue, new NamingThreadFactory(name)); allowCoreThreadTimeOut(true); }
void queueMutations(final MutationSet mutationsToSend) throws InterruptedException { if (null == mutationsToSend) return; binningThreadPool.execute(Trace.wrap(new Runnable() { @Override public void run() { if (null != mutationsToSend) { try { log.trace("{} - binning {} mutations", Thread.currentThread().getName(), mutationsToSend.size()); addMutations(mutationsToSend); } catch (Exception e) { updateUnknownErrors("Error processing mutation set", e); } } } })); }
log.info("{} offline", tableName); SimpleThreadPool pool = new SimpleThreadPool(50, "directory maker"); log.info("Rewriting entries for {}", tableName); Scanner scanner = context.createScanner(MetadataTable.NAME, Authorizations.EMPTY); pool.submit(() -> { try { vm.mkdirs(new Path(newLocation)); pool.shutdown(); while (!pool.isTerminated()) { log.trace("Waiting for mkdir() calls to finish"); try { pool.awaitTermination(5, TimeUnit.SECONDS); } catch (InterruptedException e) { Thread.currentThread().interrupt();
private static synchronized ExecutorService getThreadPool(Master master) { if (threadPool == null) { int threadPoolSize = master.getConfiguration().getCount(Property.MASTER_BULK_THREADPOOL_SIZE); ThreadPoolExecutor pool = new SimpleThreadPool(threadPoolSize, "bulk import"); pool.allowCoreThreadTimeOut(true); threadPool = new TraceExecutorService(pool); } return threadPool; }
public SimpleThreadPool(int max, final String name, BlockingQueue<Runnable> queue) { super(max, max, 4l, TimeUnit.SECONDS, queue, new NamingThreadFactory(name)); allowCoreThreadTimeOut(true); }
SimpleThreadPool workers = new SimpleThreadPool(workerCount, "bulk move"); List<Future<Exception>> results = new ArrayList<>(); results.add(workers.submit(new Callable<Exception>() { @Override public Exception call() throws Exception { workers.shutdown(); while (!workers.awaitTermination(1000L, TimeUnit.MILLISECONDS)) {}