@Test public void serializedSinkSingleProducer() throws Exception { TopicProcessor<Integer> processor = TopicProcessor.<Integer>builder() .share(false) .build(); FluxSink<Integer> sink = processor.sink(); assertThat(sink).isInstanceOf(SerializedSink.class); sink = sink.next(1); assertThat(sink).isInstanceOf(SerializedSink.class); sink = sink.onRequest(n -> {}); assertThat(sink).isInstanceOf(SerializedSink.class); }
@Test public void testCustomRequestTaskThreadShare() { String expectedName = "topicProcessorRequestTaskShare"; //NOTE: the below single executor should not be used usually as requestTask assumes it immediately gets executed ExecutorService customTaskExecutor = Executors.newSingleThreadExecutor(r -> new Thread(r, expectedName)); TopicProcessor<Object> processor = TopicProcessor.builder().share(true) .executor(Executors.newCachedThreadPool()) .requestTaskExecutor(customTaskExecutor) .bufferSize(8) .waitStrategy(WaitStrategy.liteBlocking()) .autoCancel(true) .build(); processor.requestTask(Operators.cancelledSubscription()); Thread[] threads = new Thread[Thread.activeCount()]; Thread.enumerate(threads); //cleanup to avoid visibility in other tests customTaskExecutor.shutdownNow(); processor.forceShutdown(); Condition<Thread> customRequestTaskThread = new Condition<>( thread -> thread != null && expectedName.equals(thread.getName()), "a thread named \"%s\"", expectedName); Assertions.assertThat(threads) .haveExactly(1, customRequestTaskThread); }
@Test public void nonSerializedSinkMultiProducer() throws Exception { TopicProcessor<Integer> processor = TopicProcessor.<Integer>builder() .share(true) .build(); FluxSink<Integer> sink = processor.sink(); assertThat(sink).isNotInstanceOf(SerializedSink.class); assertThat(sink.next(1)).isNotInstanceOf(SerializedSink.class); }
/** * Create a new shared TopicProcessor using the passed backlog size, with a blockingWait * Strategy and auto-cancellation. * <p> * A Shared Processor authorizes concurrent onNext calls and is suited for multi-threaded * publisher that will fan-in data. * <p> * A new Cached ThreadExecutorPool will be implicitly created and will use the passed * name to qualify the created threads. * @param name Use a new Cached ExecutorService and assign this name to the created * threads * @param bufferSize A Backlog Size to mitigate slow subscribers * @param <E> Type of processed signals * @return a fresh processor */ public static <E> TopicProcessor<E> share(String name, int bufferSize) { return TopicProcessor.<E>builder().share(true).name(name).bufferSize(bufferSize).build(); }
@Test(timeout = 5_000) public void testBufferSize1Shared() throws Exception { TopicProcessor<String> broadcast = TopicProcessor.<String>builder() .name("share-name") .bufferSize(1) .autoCancel(true) .share(true) .build(); int simultaneousSubscribers = 3000; CountDownLatch latch = new CountDownLatch(simultaneousSubscribers); Scheduler scheduler = Schedulers.single(); FluxSink<String> sink = broadcast.sink(); Flux<String> flux = broadcast.filter(Objects::nonNull) .publishOn(scheduler) .cache(1); for (int i = 0; i < simultaneousSubscribers; i++) { flux.subscribe(s -> latch.countDown()); } sink.next("data"); assertThat(latch.await(4, TimeUnit.SECONDS)) .overridingErrorMessage("Data not received") .isTrue(); }
@Test public void serializedSinkMultiProducerWithOnRequest() throws Exception { TopicProcessor<Integer> processor = TopicProcessor.<Integer>builder() .share(true) .build(); FluxSink<Integer> sink = processor.sink(); FluxSink<Integer> serializedSink = sink.onRequest(n -> { FluxSink<Integer> s = sink.next(1); assertThat(s).isInstanceOf(SerializedSink.class); s.next(2); }); assertThat(serializedSink).isInstanceOf(SerializedSink.class); StepVerifier.create(processor) .thenRequest(5) .expectNext(1, 2) .thenCancel() .verify(); } }
/** * Create a new shared TopicProcessor using the passed backlog size, with a blockingWait * Strategy and auto-cancellation. * <p> * A Shared Processor authorizes concurrent onNext calls and is suited for multi-threaded * publisher that will fan-in data. * <p> * A new Cached ThreadExecutorPool will be implicitly created and will use the passed * name to qualify the created threads. * @param name Use a new Cached ExecutorService and assign this name to the created * threads * @param bufferSize A Backlog Size to mitigate slow subscribers * @param <E> Type of processed signals * @return a fresh processor */ public static <E> TopicProcessor<E> share(String name, int bufferSize) { return TopicProcessor.<E>builder().share(true).name(name).bufferSize(bufferSize).build(); }
@Test public void shareOverrideAll() { ExecutorService executor = Executors.newSingleThreadExecutor(); ExecutorService requestTaskExecutor = Executors.newSingleThreadExecutor(); int bufferSize = 1024; WaitStrategy waitStrategy = WaitStrategy.busySpin(); boolean autoCancel = false; TopicProcessor<Integer> processor = TopicProcessor.<Integer>builder() .share(true) .executor(executor) .requestTaskExecutor(requestTaskExecutor) .bufferSize(bufferSize) .waitStrategy(waitStrategy) .autoCancel(autoCancel) .build(); assertProcessor(processor, true, null, bufferSize, waitStrategy, autoCancel, executor, requestTaskExecutor); }
@Test public void shareOverrideExecutorBufferSizeWaitStrategyAutoCancel() { ExecutorService executor = Executors.newSingleThreadExecutor(); int bufferSize = 1024; WaitStrategy waitStrategy = WaitStrategy.busySpin(); boolean autoCancel = false; TopicProcessor<Integer> processor = TopicProcessor.<Integer>builder() .share(true) .executor(executor) .bufferSize(bufferSize) .waitStrategy(waitStrategy) .autoCancel(autoCancel) .build(); assertProcessor(processor, true, null, bufferSize, waitStrategy, autoCancel, executor, null); }
@Test public void shareDefaultExecutorOverrideAll() { String name = "nameOverride"; int bufferSize = 1024; WaitStrategy waitStrategy = WaitStrategy.busySpin(); boolean autoCancel = false; TopicProcessor<Integer> processor = TopicProcessor.<Integer>builder() .share(true) .name(name) .bufferSize(bufferSize) .waitStrategy(waitStrategy) .autoCancel(autoCancel) .build(); assertProcessor(processor, true, name, bufferSize, waitStrategy, autoCancel, null, null); }
@Test public void shareOverrideExecutorBufferSizeWaitStrategy() { ExecutorService executor = Executors.newSingleThreadExecutor(); int bufferSize = 1024; WaitStrategy waitStrategy = WaitStrategy.busySpin(); TopicProcessor<Integer> processor = TopicProcessor.<Integer>builder() .share(true) .executor(executor) .bufferSize(bufferSize) .waitStrategy(waitStrategy) .build(); assertProcessor(processor, true, null, bufferSize, waitStrategy, null, executor, null); }
@Test public void shareOverrideExecutorBufferSizeAutoCancel() { ExecutorService executor = Executors.newSingleThreadExecutor(); int bufferSize = 1024; boolean autoCancel = false; TopicProcessor<Integer> processor = TopicProcessor.<Integer>builder() .share(true) .executor(executor) .bufferSize(bufferSize) .autoCancel(autoCancel) .build(); assertProcessor(processor, true, null, bufferSize, null, autoCancel, executor, null); }
@Test public void shareOverrideNameBufferSizeWaitStrategy() { String name = "nameOverride"; int bufferSize = 1024; WaitStrategy waitStrategy = WaitStrategy.busySpin(); TopicProcessor<Integer> processor = TopicProcessor.<Integer>builder() .share(true) .name(name) .bufferSize(bufferSize) .waitStrategy(waitStrategy) .build(); assertProcessor(processor, true, name, bufferSize, waitStrategy, null, null, null); }
@Test public void shareOverrideExecutorBufferSize() { ExecutorService executor = Executors.newSingleThreadExecutor(); int bufferSize = 1024; TopicProcessor<Integer> processor = TopicProcessor.<Integer>builder() .share(true) .executor(executor) .bufferSize(bufferSize) .build(); assertProcessor(processor, true, null, bufferSize, null, null, executor, null); }
@Test public void shareOverrideAutoCancel() { boolean autoCancel = false; TopicProcessor<Integer> processor = TopicProcessor.<Integer>builder() .share(true) .autoCancel(autoCancel) .build(); assertProcessor(processor, true, null, null, null, autoCancel, null, null); }
@Test public void shareOverrideNameBufferSize() { String name = "nameOverride"; int bufferSize = 1024; TopicProcessor<Integer> processor = TopicProcessor.<Integer>builder() .share(true) .name(name) .bufferSize(bufferSize) .build(); assertProcessor(processor, true, name, bufferSize, null, null, null, null); }
@Test public void shareOverrideExecutor() { ExecutorService executor = Executors.newSingleThreadExecutor(); TopicProcessor<Integer> processor = TopicProcessor.<Integer>builder() .share(true) .executor(executor) .build(); assertProcessor(processor, true, null, null, null, null, executor, null); }
@Test public void shareOverrideExecutorAutoCancel() { ExecutorService executor = Executors.newSingleThreadExecutor(); boolean autoCancel = false; TopicProcessor<Integer> processor = TopicProcessor.<Integer>builder() .share(true) .executor(executor) .autoCancel(autoCancel) .build(); assertProcessor(processor, true, null, null, null, autoCancel, executor, null); }
@Test public void createSmokeTest() { //this build sequence has been reported as throwing an exception // with JDK9 (see https://github.com/reactor/reactor-core/issues/881) TopicProcessor.builder().share(true).build(); }