public WaitFreeMultiExecutionSerializer() { this(ExecutorUtils.newScalingThreadPool(ForkJoinPool.getCommonPoolParallelism())); }
/** * Computes initial batch value for bulk tasks. The returned value * is approximately exp2 of the number of times (minus one) to * split task by two before executing leaf action. This value is * faster to compute and more convenient to use as a guide to * splitting than is the depth, since it is used while dividing by * two anyway. */ final int batchFor(long b) { long n; if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b) return 0; int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4 return (b <= 0L || (n /= b) >= sp) ? sp : (int)n; }
private static int availableParallelism() { return ForkJoinTask.inForkJoinPool() ? ForkJoinTask.getPool().getParallelism() : ForkJoinPool.getCommonPoolParallelism(); }
private static int availableParallelism() { return ForkJoinTask.inForkJoinPool() ? ForkJoinTask.getPool().getParallelism() : ForkJoinPool.getCommonPoolParallelism(); }
public static void main(String[] args) { System.out.println("Parallelism: " + ForkJoinPool.getCommonPoolParallelism()); testForEach(); // testSearch(); // testReduce(); }
private static final int getSplitThreshold(int n) { int p = ForkJoinPool.getCommonPoolParallelism(); int t = (p > 1) ? (1 + n / (p << 3)) : n; return t < MIN_ARRAY_SORT_GRAN ? MIN_ARRAY_SORT_GRAN : t; }
/** * Default target factor of leaf tasks for parallel decomposition. * To allow load balancing, we over-partition, currently to approximately * four tasks per processor, which enables others to help out * if leaf tasks are uneven or some processors are otherwise busy. */ static final int LEAF_TARGET = ForkJoinPool.getCommonPoolParallelism() << 2;
@Override protected int getThreadPoolSize() { return ForkJoinPool.getCommonPoolParallelism(); }
@Override protected int getThreadPoolSize() { return ForkJoinPool.getCommonPoolParallelism(); }
@Override protected int getThreadPoolSize() { return ForkJoinPool.getCommonPoolParallelism(); }
@Override protected int getThreadPoolSize() { return ForkJoinPool.getCommonPoolParallelism(); }
@Override protected int getThreadPoolSize() { return ForkJoinPool.getCommonPoolParallelism(); }
@Override public int getCommonPoolParallelism() { return java.util.concurrent.ForkJoinPool.getCommonPoolParallelism(); }
public static ForkJoinPool createFJPool() { return new ForkJoinPool(ForkJoinPool.getCommonPoolParallelism()); }
public static ForkJoinPool createFJPool() { return new ForkJoinPool(ForkJoinPool.getCommonPoolParallelism()); }
/** * Computes initial batch value for bulk tasks. The returned value * is approximately exp2 of the number of times (minus one) to * split task by two before executing leaf action. This value is * faster to compute and more convenient to use as a guide to * splitting than is the depth, since it is used while dividing by * two anyway. */ final int batchFor(long b) { long n; if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b) return 0; int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4 return (b <= 0L || (n /= b) >= sp) ? sp : (int)n; }
public static void main(String[] args) { System.out.println("Parallelism: " + ForkJoinPool.getCommonPoolParallelism()); testForEach(); testSearch(); testReduce(); }
private static int availableParallelism() { return ForkJoinTask.inForkJoinPool() ? ForkJoinTask.getPool().getParallelism() : ForkJoinPool.getCommonPoolParallelism(); }
private static int availableParallelism() { return ForkJoinTask.inForkJoinPool() ? ForkJoinTask.getPool().getParallelism() : ForkJoinPool.getCommonPoolParallelism(); }
@DataProvider(name = "delays", parallel = true) public static Iterator<Object[]> delays() { return IntStream.generate(() -> new Random().nextInt(3000)). limit(ForkJoinPool.getCommonPoolParallelism() * 10). boxed(). map(i -> new Object[]{i}). collect(Collectors.toList()). iterator(); }