@Override public void destroy() { if (this.forkJoinPool != null) { // Ignored for the common pool. this.forkJoinPool.shutdown(); // Wait for all tasks to terminate - works for the common pool as well. if (this.awaitTerminationSeconds > 0) { try { this.forkJoinPool.awaitTermination(this.awaitTerminationSeconds, TimeUnit.SECONDS); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } } } }
@Override public void destroy() { if (this.forkJoinPool != null) { // Ignored for the common pool. this.forkJoinPool.shutdown(); // Wait for all tasks to terminate - works for the common pool as well. if (this.awaitTerminationSeconds > 0) { try { this.forkJoinPool.awaitTermination(this.awaitTerminationSeconds, TimeUnit.SECONDS); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } } } }
@Override public void close() { if (ctagsPool != null) { ctagsPool.shutdown(); } if (forkJoinPool != null) { forkJoinPool.shutdown(); } if (fixedExecutor != null) { fixedExecutor.shutdown(); } }
public void shutdown() { super.shutdown(); pool.shutdown(); timerService.shutdown(); } private static String[] processArgs(String[] args,int offset) {
throw new IllegalStateException(e.getCause()); } finally { pool.shutdown();
throw new IllegalStateException(e.getCause()); } finally { pool.shutdown();
private void runJobGraph(int nthreads) { Preconditions.checkState(rootJob != null, "job graph not built"); ForkJoinPool pool = new ForkJoinPool(nthreads); try { pool.invoke(rootJob); } catch (Throwable err) { logger.error("experiment failed", err); if (!continueAfterError) { Throwables.throwIfUnchecked(err); // will only happen if an undeclared checked exception slips through throw new UncheckedExecutionException(err); } } finally { pool.shutdown(); } }
forkJoinPool.shutdown();
forkJoinPool.shutdown();
@Test public void testCreatingMultipleTablesAtOnce() throws InterruptedException { int threadCount = 16; CyclicBarrier barrier = new CyclicBarrier(threadCount); ForkJoinPool threadPool = new ForkJoinPool(threadCount); threadPool.submit(() -> IntStream.range(0, threadCount).parallel().forEach(i -> { try { barrier.await(); slowTimeoutKvs.createTable(GOOD_TABLE, AtlasDbConstants.GENERIC_TABLE_METADATA); } catch (BrokenBarrierException | InterruptedException e) { // Do nothing } })); threadPool.shutdown(); Preconditions.checkState(threadPool.awaitTermination(90, TimeUnit.SECONDS), "Not all table creation threads completed within the time limit"); slowTimeoutKvs.dropTable(GOOD_TABLE); }
/** * Update the count of each directory with quota in the namespace. * A directory's count is defined as the total number inodes in the tree * rooted at the directory. * * This is an update of existing state of the filesystem and does not * throw QuotaExceededException. */ void updateCountForQuota(int initThreads) { writeLock(); try { int threads = (initThreads < 1) ? 1 : initThreads; LOG.info("Initializing quota with " + threads + " thread(s)"); long start = Time.monotonicNow(); QuotaCounts counts = new QuotaCounts.Builder().build(); ForkJoinPool p = new ForkJoinPool(threads); RecursiveAction task = new InitQuotaTask(getBlockStoragePolicySuite(), rootDir.getStoragePolicyID(), rootDir, counts); p.execute(task); task.join(); p.shutdown(); LOG.info("Quota initialization completed in " + (Time.monotonicNow() - start) + " milliseconds\n" + counts); } finally { writeUnlock(); } }
private void cleanupForkJoinPool() { if (forkJoinPool == SHARED_FORK_JOIN_POOL) return; // no need to clean up else forkJoinPool.shutdown(); // private pool needs cleaning }
private void cleanupForkJoinPool() { if (forkJoinPool == SHARED_FORK_JOIN_POOL) return; // no need to clean up else forkJoinPool.shutdown(); // private pool needs cleaning }
public void shutdown() { if (pool != null) { pool.shutdown(); } }
@Override public void close() { asyncHttpClient.close(); threadPool.shutdown(); }
@Override public void close() { asyncHttpClient.close(); threadPool.shutdown(); }
@Override public void shutdown() { pool.shutdown(); try { if (!pool.awaitTermination(Kompics.SHUTDOWN_TIMEOUT, TimeUnit.MILLISECONDS)) { Kompics.logger.warn("Failed orderly Kompics shutdown"); } } catch (InterruptedException ex) { Kompics.logger.warn("Failed orderly Kompics shutdown", ex); } }
@Override double[][] run(TSneConfiguration config) { gradientPool = new ForkJoinPool(Runtime.getRuntime().availableProcessors()); gradientCalculationPool = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors()); double [][] Y = super.run(config); gradientPool.shutdown(); gradientCalculationPool.shutdown(); return Y; }
private void joinProcessors() { mapperPool.shutdown(); try { if (!mapperPool.awaitTermination(task.getTaskFinishTimeout(), TimeUnit.SECONDS)) { throw new RuntimeException("Mapper pool did not terminate after " + task.getTaskFinishTimeout() + " seconds."); } } catch (InterruptedException ex) { log.error("Interrupted while waiting for mapper pool termination."); Throwables.propagate(ex); } }
@Override public float[] compute(boolean biasCorrected) { float[] means = new float[numOfCols]; ForkJoinPool pool = new ForkJoinPool(numOfThreads); pool.invoke(new MeanAction(data, means, 0, numOfCols - 1)); pool.invoke(new VarianceAction(data, means, biasCorrected, 0, numOfCols - 1)); pool.shutdown(); return means; }