Refine search
/** * @param grpCtx Group context. * @param part Local partition. */ private Future<Map<PartitionKey, PartitionHashRecord>> calculatePartitionHashAsync( final CacheGroupContext grpCtx, final GridDhtLocalPartition part ) { return ForkJoinPool.commonPool().submit(new Callable<Map<PartitionKey, PartitionHashRecord>>() { @Override public Map<PartitionKey, PartitionHashRecord> call() throws Exception { return calculatePartitionHash(grpCtx, part); } }); }
public static ExecutorService newScalingThreadPool(final int maxThreads) { final ForkJoinPool.ForkJoinWorkerThreadFactory factory = pool -> { final ForkJoinWorkerThread worker = ForkJoinPool.defaultForkJoinWorkerThreadFactory.newThread(pool); worker.setName("OrbitThread-" + worker.getPoolIndex()); return worker; }; return new ForkJoinPool(maxThreads, factory, (t, e) -> logger.log(Level.SEVERE, "Uncaught exception", e), false); } }
@Override public void execute(Runnable r) { executor.execute(r); }
@Override public void afterPropertiesSet() { this.forkJoinPool = (this.commonPool ? ForkJoinPool.commonPool() : new ForkJoinPool(this.parallelism, this.threadFactory, this.uncaughtExceptionHandler, this.asyncMode)); }
@Override public void destroy() { if (this.forkJoinPool != null) { // Ignored for the common pool. this.forkJoinPool.shutdown(); // Wait for all tasks to terminate - works for the common pool as well. if (this.awaitTerminationSeconds > 0) { try { this.forkJoinPool.awaitTermination(this.awaitTerminationSeconds, TimeUnit.SECONDS); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } } } }
@Override public Executor createExecutor() { return ForkJoinPool.commonPool(); } },
__ -> { }, () -> captureCompletion.set(true), s -> ForkJoinPool.commonPool().execute(() -> RaceTestUtils.race(s::cancel, () -> s.request(1), Schedulers.parallel())
IntStream taskIndices = parallelism > 1 ? range.parallel() : range; if (parallelism > 1 && privatePool) { ForkJoinPool pool = new ForkJoinPool(parallelism); try { pool.submit(() -> taskIndices.forEach(task::accept)).get(); } catch (InterruptedException e) { throw new IllegalStateException(e); throw new IllegalStateException(e.getCause()); } finally { pool.shutdown();
private void runJobGraph(int nthreads) { Preconditions.checkState(rootJob != null, "job graph not built"); ForkJoinPool pool = new ForkJoinPool(nthreads); try { pool.invoke(rootJob); } catch (Throwable err) { logger.error("experiment failed", err); if (!continueAfterError) { Throwables.throwIfUnchecked(err); // will only happen if an undeclared checked exception slips through throw new UncheckedExecutionException(err); } } finally { pool.shutdown(); } }
@Test public void testAcquireSem() { Semaphore sem = new Semaphore(2); ForkJoinPool.commonPool().invoke(new RecursiveAction() { AtomicInteger running = new AtomicInteger();
ForkJoinPool forkJoinPool = new ForkJoinPool(2); forkJoinPool.submit(() -> //parallel task here, for example IntStream.range(1, 1_000_000).parallel().filter(PrimesPrint::isPrime).collect(toList()) ).get();
private int getNumberOfBatches(final ReaderSpliterator spliterator) throws ExecutionException, InterruptedException { final AtomicInteger numSplits = new AtomicInteger(0); //we want to wrap the spliterator and count the (valid) splits Spliterator<String> delegatingSpliterator = spy(spliterator); doAnswer(invocationOnMock -> { Spliterator<String> ret = spliterator.trySplit(); if(ret != null) { numSplits.incrementAndGet(); } return ret; }).when(delegatingSpliterator).trySplit(); Stream<String> stream = StreamSupport.stream(delegatingSpliterator, true); //now run it in a parallel pool and do some calculation that doesn't really matter. ForkJoinPool forkJoinPool = ForkJoinPool.commonPool(); forkJoinPool.submit(() -> { Map<String, Integer> threads = stream.parallel().map(s -> Thread.currentThread().getName()) .collect(Collectors.toMap(s -> s, s -> 1, Integer::sum)); Assert.assertTrue(threads.size() > 0); } ).get(); return numSplits.get(); }
protected List<Path> writeParallel(Configuration hadoopConfig, Map<Path, List<byte[]>> toWrite, int parallelism) throws IOException { List<Path> outFiles = Collections.synchronizedList(new ArrayList<>()); ForkJoinPool tp = new ForkJoinPool(parallelism); try { tp.submit(() -> { toWrite.entrySet().parallelStream().forEach(e -> { Path path = e.getKey(); List<byte[]> data = e.getValue(); if (data.size() > 0) { try { write(getResultsWriter(), hadoopConfig, data, path); } catch (IOException ioe) { throw new RuntimeException( String.format("Failed to write results to path '%s'", path.toString()), ioe); } outFiles.add(path); } }); }).get(); } catch (InterruptedException | ExecutionException e) { throw new IOException("Error finalizing results.", e); } catch (RuntimeException e) { throw new IOException(e.getMessage(), e.getCause()); } outFiles.sort((o1, o2) -> o1.getName().compareTo(o2.getName())); return outFiles; }
ForkJoinPool builderPool = new ForkJoinPool(taskThreadCount, factory, null, true); CuboidResultWatcher resultWatcher = new CuboidResultWatcher(builderList, output); try { BaseCuboidTask<T> task = new BaseCuboidTask<>(inputController, 1, resultWatcher); builderPool.execute(task); do { builderList.add(task.getInternalBuilder()); builderPool.submit(new Runnable() { @Override public void run() { closeGirdTables(builderList); sw.stop(); builderPool.shutdownNow(); logger.info("Dogged Cube Build2 end, totally took " + sw.elapsedMillis() + " ms"); logger.info("Dogged Cube Build2 return");
ForkJoinPool builderPool = new ForkJoinPool(taskThreadCount, factory, null, true); ForkJoinTask rootTask = builderPool.submit(new Runnable() { @Override public void run() { rootTask.join();
static ForkJoinPool setUpForkJoinPool() { int numThreads; try { String configuredNumThreads = System.getProperty("maxParallelTestThreads"); numThreads = Math.max(2, Integer.parseInt(configuredNumThreads)); } catch (Exception ignored) { Runtime runtime = Runtime.getRuntime(); numThreads = Math.max(2, runtime.availableProcessors()); } ForkJoinPool.ForkJoinWorkerThreadFactory threadFactory = pool -> { if (pool.getPoolSize() >= pool.getParallelism()) { return null; } else { ForkJoinWorkerThread thread = ForkJoinPool.defaultForkJoinWorkerThreadFactory.newThread(pool); thread.setName("JUnit-" + thread.getName()); return thread; } }; return new ForkJoinPool(numThreads, threadFactory, null, false); }
@Test public void testCreatingMultipleTablesAtOnce() throws InterruptedException { int threadCount = 16; CyclicBarrier barrier = new CyclicBarrier(threadCount); ForkJoinPool threadPool = new ForkJoinPool(threadCount); threadPool.submit(() -> IntStream.range(0, threadCount).parallel().forEach(i -> { try { barrier.await(); slowTimeoutKvs.createTable(GOOD_TABLE, AtlasDbConstants.GENERIC_TABLE_METADATA); } catch (BrokenBarrierException | InterruptedException e) { // Do nothing } })); threadPool.shutdown(); Preconditions.checkState(threadPool.awaitTermination(90, TimeUnit.SECONDS), "Not all table creation threads completed within the time limit"); slowTimeoutKvs.dropTable(GOOD_TABLE); }
@Test(timeout = 5_000) public void testThreadSafety() throws Exception { final int numOfThreads = 3; CountDownLatch latch = new CountDownLatch(numOfThreads); AtomicInteger counter = new AtomicInteger(); Future<Integer> lazyFuture = new LazyFuture<Integer>() { @Override @SneakyThrows(InterruptedException.class) protected Integer resolve() { latch.await(); return counter.incrementAndGet(); } }; Future<List<Integer>> task = new ForkJoinPool(numOfThreads).submit(() -> { return IntStream.rangeClosed(1, numOfThreads).parallel().mapToObj(i -> Futures.getUnchecked(lazyFuture)).collect(toList()); }); while (latch.getCount() > 0) { latch.countDown(); } assertEquals("All threads receives the same result", Collections.nCopies(numOfThreads, 1), task.get()); }
/** * Update the count of each directory with quota in the namespace. * A directory's count is defined as the total number inodes in the tree * rooted at the directory. * * This is an update of existing state of the filesystem and does not * throw QuotaExceededException. */ void updateCountForQuota(int initThreads) { writeLock(); try { int threads = (initThreads < 1) ? 1 : initThreads; LOG.info("Initializing quota with " + threads + " thread(s)"); long start = Time.monotonicNow(); QuotaCounts counts = new QuotaCounts.Builder().build(); ForkJoinPool p = new ForkJoinPool(threads); RecursiveAction task = new InitQuotaTask(getBlockStoragePolicySuite(), rootDir.getStoragePolicyID(), rootDir, counts); p.execute(task); task.join(); p.shutdown(); LOG.info("Quota initialization completed in " + (Time.monotonicNow() - start) + " milliseconds\n" + counts); } finally { writeUnlock(); } }