public final int addAndGet(int delta) { if (delta < 0) { throw new IllegalArgumentException("delta " + delta + " < 0"); } return indexUpdater.addAndGet(this, delta) & Integer.MAX_VALUE; }
public final int addAndGet(int delta) { if (delta < 0) { throw new IllegalArgumentException("delta " + delta + " < 0"); } return indexUpdater.addAndGet(this, delta) & Integer.MAX_VALUE; }
@Override public int leave(int missed) { return WIP.addAndGet(this, -missed); }
@Override public int leave(int missed) { return WIP.addAndGet(this, -missed); }
public void incRefCount() { refCountUpdater.addAndGet(this, 1); }
public Object remove(RegionEntry entry) { Object retVal = this.map.remove(entry); if (retVal != null) { atomicUpdater.addAndGet(this, retVal instanceof Collection ? -((Collection) retVal).size() : -1); } return retVal; }
void dequeue() { if (sentCommand != null) { QUEUE_SIZE.decrementAndGet(endpoint); } else { QUEUE_SIZE.addAndGet(endpoint, -sentCommands.size()); } }
private void cleanupThreadLocalData() { // Called under lock, and only when at least quarter of the capacity has been collected. int size = threadLocalDataList.size(); if (reclaimedThreadLocals > (size / 4)) { int j = 0; for (int i = 0; i < size; i++) { WeakReference<ThreadLocalData> ref = threadLocalDataList.get(i); if (ref.get() != null) { threadLocalDataList.set(j++, ref); } } for (int i = size - 1; i >= j; i--) { // A tail remove is inlined to a range change check and a decrement threadLocalDataList.remove(i); } reclaimedThreadLocalsUpdater.addAndGet(this, -1 * (size - j)); } }
/** * This replaces a key's value along with updating the numValues correctly. */ public void replace(RegionEntry entry, Object values) { int numOldValues = getNumValues(entry); this.map.put(entry, values); atomicUpdater.addAndGet(this, (values instanceof Collection ? ((Collection) values).size() : 1) - numOldValues); } }
@Override public void clear() { headLock.lock(); try { int size = SIZE_UPDATER.get(this); for (int i = 0; i < size; i++) { data[headIndex.value] = null; headIndex.value = (headIndex.value + 1) & (data.length - 1); } if (SIZE_UPDATER.addAndGet(this, -size) > 0) { // There are still entries to consume isNotEmpty.signal(); } } finally { headLock.unlock(); } }
private void increaseAvailablePermits(ClientCnx currentCnx, int delta) { int available = AVAILABLE_PERMITS_UPDATER.addAndGet(this, delta); while (available >= receiverQueueRefillThreshold && !paused) { if (AVAILABLE_PERMITS_UPDATER.compareAndSet(this, available, 0)) { sendFlowPermitsToBroker(currentCnx, available); break; } else { available = AVAILABLE_PERMITS_UPDATER.get(this); } } }
@Override public int drainTo(Collection<? super T> c, int maxElements) { headLock.lock(); try { int drainedItems = 0; int size = SIZE_UPDATER.get(this); while (size > 0 && drainedItems < maxElements) { T item = data[headIndex.value]; data[headIndex.value] = null; c.add(item); headIndex.value = (headIndex.value + 1) & (data.length - 1); --size; ++drainedItems; } if (SIZE_UPDATER.addAndGet(this, -drainedItems) > 0) { // There are still entries to consume isNotEmpty.signal(); } return drainedItems; } finally { headLock.unlock(); } }
@Override public void run() { final Queue<ExecutorTrackedRunnable> q = queue; for (; ; ) { int e = 0; int r = wip; while (e != r) { if (terminated) { return; } ExecutorTrackedRunnable task = q.poll(); if (task == null) { break; } task.run(); e++; } if (e == r && terminated) { return; } if (WIP.addAndGet(this, -e) == 0) { break; } } }
void drain() { if (WIP.getAndIncrement(this) != 0) { return; } int missed = 1; for (; ; ) { Subscriber<? super T> a = actual; //noinspection ConstantConditions if (a != null) { innerDrain(a); return; } missed = WIP.addAndGet(this, -missed); if (missed == 0) { break; } } }
private void writeToChannelAndFlush(Collection<? extends RedisCommand<?, ?, ?>> commands) { QUEUE_SIZE.addAndGet(this, commands.size()); if (reliability == Reliability.AT_MOST_ONCE) { // cancel on exceptions and remove from queue, because there is no housekeeping for (RedisCommand<?, ?, ?> command : commands) { channelWrite(command).addListener(AtMostOnceWriteListener.newInstance(this, command)); } } if (reliability == Reliability.AT_LEAST_ONCE) { // commands are ok to stay within the queue, reconnect will retrigger them for (RedisCommand<?, ?, ?> command : commands) { channelWrite(command).addListener(RetryListener.newInstance(this, command)); } } channelFlush(); }
boolean serializeDrainAndSubscriptionEvent() { int missed = WIP.incrementAndGet(this); if (missed != 1) { return true; } for (; ; ) { if(onSubscriptionLoop()){ return true; } if(establishedFusionMode == Fuseable.ASYNC) { drainAsyncLoop(); } missed = WIP.addAndGet(this, -missed); if (missed == 0) { break; } } return false; }
void drain() { if (WIP.getAndIncrement(this) != 0) { return; } int missed = 1; for (;;) { Subscriber<? super T> a = actual; if (a != null) { if (outputFused) { drainFused(a); } else { drainRegular(a); } return; } missed = WIP.addAndGet(this, -missed); if (missed == 0) { break; } } }
void drain() { if (WIP.getAndIncrement(this) != 0) { return; } int missed = 1; for (; ; ) { Subscriber<? super T> a = actual; if (a != null) { if (enabledFusion) { drainFused(a); } else { drainRegular(a); } return; } missed = WIP.addAndGet(this, -missed); if (missed == 0) { break; } } }
void runBackfused() { int missed = 1; for (; ; ) { if (cancelled) { Operators.onDiscardQueueWithClear(queue, actual.currentContext(), null); return; } boolean d = done; actual.onNext(null); if (d) { Throwable e = error; if (e != null) { doError(actual, e); } else { doComplete(actual); } return; } missed = WIP.addAndGet(this, -missed); if (missed == 0) { break; } } }
void runBackfused() { int missed = 1; for (; ; ) { if (cancelled) { Operators.onDiscardQueueWithClear(queue, actual.currentContext(), null); return; } boolean d = done; actual.onNext(null); if (d) { Throwable e = error; if (e != null) { doError(actual, e); } else { doComplete(actual); } return; } missed = WIP.addAndGet(this, -missed); if (missed == 0) { break; } } }