SpscLinkedArrayQueue<R> getOrCreateQueue() { for (;;) { SpscLinkedArrayQueue<R> current = queue.get(); if (current != null) { return current; } current = new SpscLinkedArrayQueue<R>(Flowable.bufferSize()); if (queue.compareAndSet(null, current)) { return current; } } }
SimplePlainQueue<T> getOrCreateQueue() { SimplePlainQueue<T> q = queue; if (q == null) { q = new SpscArrayQueue<T>(bufferSize()); queue = q; } return q; }
@Override public Publisher<? extends R> apply(List<Publisher<? extends T>> list) { return Flowable.zipIterable(list, zipper, false, Flowable.bufferSize()); } }
SpscLinkedArrayQueue<R> getOrCreateQueue() { for (;;) { SpscLinkedArrayQueue<R> current = queue.get(); if (current != null) { return current; } current = new SpscLinkedArrayQueue<R>(Flowable.bufferSize()); if (queue.compareAndSet(null, current)) { return current; } } }
@Override public Object apply(Integer i) { return i % (Flowable.bufferSize() + 2); } })
MergeWithObserver(Subscriber<? super T> downstream) { this.downstream = downstream; this.mainSubscription = new AtomicReference<Subscription>(); this.otherObserver = new OtherObserver<T>(this); this.error = new AtomicThrowable(); this.requested = new AtomicLong(); this.prefetch = bufferSize(); this.limit = prefetch - (prefetch >> 2); }
MergeWithObserver(Subscriber<? super T> downstream) { this.downstream = downstream; this.mainSubscription = new AtomicReference<Subscription>(); this.otherObserver = new OtherObserver<T>(this); this.error = new AtomicThrowable(); this.requested = new AtomicLong(); this.prefetch = bufferSize(); this.limit = prefetch - (prefetch >> 2); }
/** * Take a Publisher and prepare to consume it on multiple 'rails' (number of CPUs) * in a round-robin fashion. * @param <T> the value type * @param source the source Publisher * @return the ParallelFlowable instance */ @CheckReturnValue public static <T> ParallelFlowable<T> from(@NonNull Publisher<? extends T> source) { return from(source, Runtime.getRuntime().availableProcessors(), Flowable.bufferSize()); }
/** * Take a Publisher and prepare to consume it on parallelism number of 'rails' in a round-robin fashion. * @param <T> the value type * @param source the source Publisher * @param parallelism the number of parallel rails * @return the new ParallelFlowable instance */ @CheckReturnValue public static <T> ParallelFlowable<T> from(@NonNull Publisher<? extends T> source, int parallelism) { return from(source, parallelism, Flowable.bufferSize()); }
@Test public void testNoBackpressure() { ArrayList<Long> list = new ArrayList<Long>(Flowable.bufferSize() * 2); for (long i = 1; i <= Flowable.bufferSize() * 2 + 1; i++) { list.add(i); } Observable<Long> o = Observable.rangeLong(1, list.size()); TestObserver<Long> to = new TestObserver<Long>(); o.subscribe(to); to.assertValueSequence(list); to.assertTerminated(); }
@Override public void onNext(Integer t) { super.onNext(t); if (t == 1) { for (int i = 0; i < Flowable.bufferSize() - 1; i++) { pp.onNext(i + 2); } } } });
@Override public Flowable<Integer> apply(Integer t) { return Flowable.range(1, Flowable.bufferSize() * 2) .doOnNext(new Consumer<Integer>() { @Override public void accept(Integer t) { count.getAndIncrement(); } }).hide(); } }).subscribe(ts);
@Test public void testSkipLastWithBackpressure() { Flowable<Integer> f = Flowable.range(0, Flowable.bufferSize() * 2).skipLast(Flowable.bufferSize() + 10); TestSubscriber<Integer> ts = new TestSubscriber<Integer>(); f.observeOn(Schedulers.computation()).subscribe(ts); ts.awaitTerminalEvent(); ts.assertNoErrors(); assertEquals((Flowable.bufferSize()) - 10, ts.valueCount()); }
@Test public void testObserveOn() { int num = (int) (Flowable.bufferSize() * 2.1); AtomicInteger c = new AtomicInteger(); TestSubscriber<Integer> ts = new TestSubscriber<Integer>(); incrementingIntegers(c).observeOn(Schedulers.computation()).take(num).subscribe(ts); ts.awaitTerminalEvent(); ts.assertNoErrors(); System.out.println("testObserveOn => Received: " + ts.valueCount() + " Emitted: " + c.get()); assertEquals(num, ts.valueCount()); assertTrue(c.get() < Flowable.bufferSize() * 4); }
@Test public void testBackpressure() { TestSubscriber<Integer> ts = new TestSubscriber<Integer>(); Flowable.range(0, Flowable.bufferSize() * 2) .ambWith(Flowable.range(0, Flowable.bufferSize() * 2)) .observeOn(Schedulers.computation()) // observeOn has a backpressured RxRingBuffer .delay(1, TimeUnit.MICROSECONDS) // make it a slightly slow consumer .subscribe(ts); ts.awaitTerminalEvent(); ts.assertNoErrors(); assertEquals(Flowable.bufferSize() * 2, ts.values().size()); }
@Test public void testInnerBackpressureWithAlignedBoundaries() { TestSubscriber<Integer> ts = new TestSubscriber<Integer>(); Flowable.range(0, Flowable.bufferSize() * 2) .concatWith(Flowable.range(0, Flowable.bufferSize() * 2)) .observeOn(Schedulers.computation()) // observeOn has a backpressured RxRingBuffer .subscribe(ts); ts.awaitTerminalEvent(); ts.assertNoErrors(); assertEquals(Flowable.bufferSize() * 4, ts.valueCount()); }
@Test public void testInnerBackpressureWithoutAlignedBoundaries() { TestSubscriber<Integer> ts = new TestSubscriber<Integer>(); Flowable.range(0, (Flowable.bufferSize() * 2) + 10) .concatWith(Flowable.range(0, (Flowable.bufferSize() * 2) + 10)) .observeOn(Schedulers.computation()) // observeOn has a backpressured RxRingBuffer .subscribe(ts); ts.awaitTerminalEvent(); ts.assertNoErrors(); assertEquals((Flowable.bufferSize() * 4) + 20, ts.valueCount()); }
@Test(timeout = 500) public void testWithObserveOn() throws InterruptedException { TestSubscriber<Integer> ts = new TestSubscriber<Integer>(); Flowable.range(0, Flowable.bufferSize() * 10).onBackpressureDrop().observeOn(Schedulers.io()).subscribe(ts); ts.awaitTerminalEvent(); }
@Test public void testBackpressure2() { TestSubscriber<Integer> ts = new TestSubscriber<Integer>(); Flowable.range(1, 100000).takeLast(Flowable.bufferSize() * 4) .observeOn(Schedulers.newThread()).map(newSlowProcessor()).subscribe(ts); ts.awaitTerminalEvent(); ts.assertNoErrors(); assertEquals(Flowable.bufferSize() * 4, ts.valueCount()); }
@Test public void longSequenceEquals() { Flowable<Integer> source = Flowable.range(1, Flowable.bufferSize() * 4).subscribeOn(Schedulers.computation()); Flowable.sequenceEqual(source, source) .test() .awaitDone(5, TimeUnit.SECONDS) .assertResult(true); }