/** * {@link Scheduler} that dynamically creates ExecutorService-based Workers and caches * the thread pools, reusing them once the Workers have been shut down. * <p> * The maximum number of created thread pools is unbounded. * <p> * This scheduler is not restartable. * * @param name Thread prefix * @param ttlSeconds Time-to-live for an idle {@link reactor.core.scheduler.Scheduler.Worker} * * @return a new {@link Scheduler} that hosts a fixed pool of single-threaded * ExecutorService-based workers and is suited for parallel work */ public static Scheduler newElastic(String name, int ttlSeconds) { return newElastic(name, ttlSeconds, false); }
/** * {@link Scheduler} that dynamically creates ExecutorService-based Workers and caches * the thread pools, reusing them once the Workers have been shut down. * <p> * The maximum number of created thread pools is unbounded. * <p> * The default time-to-live for unused thread pools is 60 seconds, use the appropriate * factory to set a different value. * <p> * This scheduler is not restartable. * * @param name Thread prefix * * @return a new {@link Scheduler} that hosts a fixed pool of single-threaded * ExecutorService-based workers and is suited for parallel work */ public static Scheduler newElastic(String name) { return newElastic(name, ElasticScheduler.DEFAULT_TTL_SECONDS); }
@Override protected Scheduler scheduler() { return Schedulers.newElastic("ElasticSchedulerTest"); }
/** * {@link Scheduler} that dynamically creates ExecutorService-based Workers and caches * the thread pools, reusing them once the Workers have been shut down. * <p> * The maximum number of created thread pools is unbounded. * <p> * This scheduler is not restartable. * * @param name Thread prefix * @param ttlSeconds Time-to-live for an idle {@link reactor.core.scheduler.Scheduler.Worker} * @param daemon false if the {@link Scheduler} requires an explicit {@link * Scheduler#dispose()} to exit the VM. * * @return a new {@link Scheduler} that hosts a fixed pool of single-threaded * ExecutorService-based workers and is suited for parallel work */ public static Scheduler newElastic(String name, int ttlSeconds, boolean daemon) { return newElastic(ttlSeconds, new ReactorThreadFactory(name, ElasticScheduler.COUNTER, daemon, false, Schedulers::defaultUncaughtException)); }
@Test(expected = IllegalArgumentException.class) public void negativeTime() throws Exception { Schedulers.newElastic("test", -1); }
@BeforeClass public static void loadEnv() { ioGroup = Schedulers.newElastic("work"); asyncGroup = Schedulers.newParallel("parallel", 4); }
@Test(timeout = 5000) public void elasticSchedulerThreadCheck() throws Exception{ Scheduler s = Schedulers.newElastic("work"); try { Scheduler.Worker w = s.createWorker(); Thread currentThread = Thread.currentThread(); AtomicReference<Thread> taskThread = new AtomicReference<>(currentThread); CountDownLatch latch = new CountDownLatch(1); w.schedule(() -> { taskThread.set(Thread.currentThread()); latch.countDown(); }); latch.await(); assertThat(taskThread.get()).isNotEqualTo(currentThread); } finally { s.dispose(); } }
@Test public void elasticSchedulerDefaultBlockingOk() throws InterruptedException { Scheduler scheduler = Schedulers.newElastic("elasticSchedulerDefaultNonBlocking"); CountDownLatch latch = new CountDownLatch(1); AtomicReference<Throwable> errorRef = new AtomicReference<>(); try { scheduler.schedule(() -> { try { Mono.just("foo") .hide() .block(); } catch (Throwable t) { errorRef.set(t); } finally { latch.countDown(); } }); latch.await(); } finally { scheduler.dispose(); } assertThat(errorRef.get()).isNull(); }
@Test public void decorateTwiceWithSameSchedulerInstance() { Scheduler instance = Schedulers.newElastic("TWICE", 1); ScheduledExecutorService service = Executors.newSingleThreadScheduledExecutor(); Schedulers.decorateExecutorService(instance, service); Schedulers.decorateExecutorService(instance, service); assertThat(simpleMeterRegistry.getMeters() .stream() .map(m -> m.getId().getTag("name")) .distinct()) .containsOnly( "elastic(\"TWICE\")-0", "elastic(\"TWICE\")-1" ); }
@Test public void testParallelism() throws Exception { Flux<Integer> flux = Flux.just(1, 2, 3); Set<String> threadNames = Collections.synchronizedSet(new TreeSet<>()); AtomicInteger count = new AtomicInteger(); CountDownLatch latch = new CountDownLatch(3); flux // Uncomment line below for failure .cache(1) .parallel(3) .runOn(Schedulers.newElastic("TEST")) .subscribe(i -> { threadNames.add(Thread.currentThread() .getName()); count.incrementAndGet(); latch.countDown(); tryToSleep(1000); }); latch.await(); Assert.assertEquals("Multithreaded count", 3, count.get()); Assert.assertEquals("Multithreaded threads", 3, threadNames.size()); }
@Test(timeout = 10000) public void eviction() throws Exception { Scheduler s = Schedulers.newElastic("test-recycle", 2); ((ElasticScheduler)s).evictor.shutdownNow(); try{ Disposable d = s.schedule(() -> { try { Thread.sleep(10000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } }); d.dispose(); while(((ElasticScheduler)s).cache.peek() != null){ ((ElasticScheduler)s).eviction(); Thread.sleep(100); } } finally { s.dispose(); s.dispose();//noop } assertThat(((ElasticScheduler)s).cache).isEmpty(); assertThat(s.isDisposed()).isTrue(); }
@Test public void smokeTestDelay() { for (int i = 0; i < 20; i++) { Scheduler s = Schedulers.newElastic("test"); AtomicLong start = new AtomicLong(); AtomicLong end = new AtomicLong(); try { StepVerifier.create(Mono .delay(Duration.ofMillis(100), s) .doOnSubscribe(sub -> start.set(System.nanoTime())) .doOnTerminate(() -> end.set(System.nanoTime())) ) .expectSubscription() .expectNext(0L) .verifyComplete(); long endValue = end.longValue(); long startValue = start.longValue(); long measuredDelay = endValue - startValue; long measuredDelayMs = TimeUnit.NANOSECONDS.toMillis(measuredDelay); assertThat(measuredDelayMs) .as("iteration %s, measured delay %s nanos, start at %s nanos, end at %s nanos", i, measuredDelay, startValue, endValue) .isGreaterThanOrEqualTo(100L) .isLessThan(200L); } finally { s.dispose(); } } }
@Test public void scanCapacity() { Scheduler scheduler = Schedulers.newElastic(2, Thread::new); Scheduler.Worker worker = scheduler.createWorker(); try { assertThat(Scannable.from(scheduler).scan(Scannable.Attr.CAPACITY)).as("scheduler unbounded").isEqualTo(Integer.MAX_VALUE); assertThat(Scannable.from(worker).scan(Scannable.Attr.CAPACITY)).as("worker capacity").isEqualTo(1); } finally { worker.dispose(); scheduler.dispose(); } } }
@Test public void metricsActivatedHasDistinctSchedulerIdTags() { Schedulers.newParallel("A", 4); Schedulers.newParallel("A", 4); Schedulers.newParallel("A", 3); Schedulers.newSingle("B"); Schedulers.newElastic("C").createWorker(); assertThat(simpleMeterRegistry.getMeters() .stream() .map(m -> m.getId().getTag(SchedulerMetricDecorator.TAG_SCHEDULER_ID)) .distinct()) .containsOnly( "parallel(4,\"A\")", "parallel(4,\"A\")#1", "parallel(3,\"A\")", "single(\"B\")", "elastic(\"C\")" ); }
@Test public void allEnabled() { Assert.assertFalse(Schedulers.newParallel("") instanceof VirtualTimeScheduler); Assert.assertFalse(Schedulers.newElastic("") instanceof VirtualTimeScheduler); Assert.assertFalse(Schedulers.newSingle("") instanceof VirtualTimeScheduler); VirtualTimeScheduler.getOrSet(); Assert.assertTrue(Schedulers.newParallel("") instanceof VirtualTimeScheduler); Assert.assertTrue(Schedulers.newElastic("") instanceof VirtualTimeScheduler); Assert.assertTrue(Schedulers.newSingle("") instanceof VirtualTimeScheduler); VirtualTimeScheduler t = VirtualTimeScheduler.get(); Assert.assertSame(Schedulers.newParallel(""), t); Assert.assertSame(Schedulers.newElastic(""), t); Assert.assertSame(Schedulers.newSingle(""), t); }
@Test public void scanName() { Scheduler withNamedFactory = Schedulers.newElastic("scanName", 1); Scheduler withBasicFactory = Schedulers.newElastic(1, Thread::new); Scheduler cached = Schedulers.elastic();
@Test public void assertNextWithSubscribeOnDirectProcessor() { Scheduler scheduler = Schedulers.newElastic("test"); DirectProcessor<Integer> processor = DirectProcessor.create(); Mono<Integer> doAction = Mono.fromSupplier(() -> 22) .doOnNext(processor::onNext) .subscribeOn(scheduler); assertThatExceptionOfType(AssertionError.class) .isThrownBy( StepVerifier.create(processor) .then(doAction::subscribe) .assertNext(v -> assertThat(v).isEqualTo(23)) .thenCancel() ::verify); }
@Test public void classicWithTimeout() { AssertSubscriber<Integer> ts = AssertSubscriber.create(0); Mono.fromCallable(() -> { try { TimeUnit.SECONDS.sleep(2L); } catch (InterruptedException ignore) { } return 0; }) .timeout(Duration.ofMillis(100L)) .onErrorResume(t -> Mono.fromCallable(() -> 1)) .subscribeOn(Schedulers.newElastic("timeout")) .subscribe(ts); ts.request(1); ts.await(Duration.ofMillis(400)) .assertValues(1) .assertNoError() .assertComplete(); }
@Test public void testOverride() throws InterruptedException { TestSchedulers ts = new TestSchedulers(true); Schedulers.setFactory(ts); Assert.assertEquals(ts.single, Schedulers.newSingle("unused")); Assert.assertEquals(ts.elastic, Schedulers.newElastic("unused")); Assert.assertEquals(ts.parallel, Schedulers.newParallel("unused")); Schedulers.resetFactory(); Scheduler s = Schedulers.newSingle("unused"); s.dispose(); Assert.assertNotSame(ts.single, s); }
public Flux<?> flux() { Scheduler scheduler = Schedulers.newElastic("sample", 60, true); return KafkaReceiver.create(receiverOptions(Collections.singleton(topic)).commitInterval(Duration.ZERO)) .receive() .groupBy(m -> m.receiverOffset().topicPartition()) .flatMap(partitionFlux -> partitionFlux.publishOn(scheduler) .map(r -> processRecord(partitionFlux.key(), r)) .sample(Duration.ofMillis(5000)) .concatMap(offset -> offset.commit())) .doOnCancel(() -> close()); } public ReceiverOffset processRecord(TopicPartition topicPartition, ReceiverRecord<Integer, Person> message) {