@Override public <R> Optional<R> execute(Supplier<R> executeTask) { long stamp = this.lock.tryReadLock(); if (stamp != 0L) { try { return Optional.of(executeTask.get()); } finally { this.lock.unlock(stamp); } } return Optional.empty(); }
lock.unlock(stamp);
/** * @return the maven configuration to use for the runtime. */ public static MavenConfiguration getMavenConfig() { long stamp = lock.readLock(); try { if (mavenConfig == null) { long writeStamp = lock.tryConvertToWriteLock(stamp); if (writeStamp == 0L) { lock.unlockRead(stamp); stamp = lock.writeLock(); } else { stamp = writeStamp; } if (mavenConfig == null) { initialiseGlobalConfig(); } } return mavenConfig; } finally { lock.unlock(stamp); } }
@Override public void execute(Runnable executeTask) { long stamp = this.lock.tryReadLock(); if (stamp != 0L) { try { executeTask.run(); } finally { this.lock.unlock(stamp); } } }
lock.unlock(stamp);
@Override public <R, E extends Exception> Optional<R> execute(ExceptionSupplier<R, E> executeTask) throws E { long stamp = this.lock.tryReadLock(); if (stamp != 0L) { try { return Optional.of(executeTask.get()); } finally { this.lock.unlock(stamp); } } return Optional.empty(); }
private static CacheEntry getOrCreate(String key) { long stamp = lock.readLock(); try { final CacheEntry entry = cache.get(key); if (entry != null) { return entry; } stamp = convertToWriteLock(stamp); return cache.computeIfAbsent(key, CacheEntry::new); } finally { lock.unlock(stamp); } }
@Override public <E extends Exception> void execute(ExceptionRunnable<E> executeTask) throws E { long stamp = this.lock.tryReadLock(); if (stamp != 0L) { try { executeTask.run(); } finally { this.lock.unlock(stamp); } } }
@Override public long receive( long ticket, T batch ) { long time = nanoTime(); while ( queuedBatches.get() >= maxQueueLength ) { PARK.park( receiverThread = Thread.currentThread() ); } // It is of importance that all items in the queue at the same time agree on the number of processors. We take this lock in order to make sure that we // do not interfere with another thread trying to drain the queue in order to change the processor count. long lock = applyProcessorCount( stripingLock.readLock() ); queuedBatches.incrementAndGet(); Unit unit = new Unit( ticket, batch, numberOfForkedProcessors ); // [old head] [unit] // ^ // head Unit myHead = head.getAndSet( unit ); // [old head] -next-> [unit] myHead.next = unit; stripingLock.unlock( lock ); return nanoTime() - time; }
/** * Clears the cache. */ public static void clear() { int size; long stamp = lock.readLock(); try { size = cache.size(); if (size == 0) { return; } stamp = convertToWriteLock(stamp); size = cache.size(); cache.clear(); } finally { lock.unlock(stamp); } if (size != 0 && logger.isDebugEnabled()) { if (size != 1) { logger.debug("Cleared: {} entries", size); } else { logger.debug("Cleared: 1 entry"); } } }
private long applyProcessorCount( long lock ) { if ( numberOfForkedProcessors != targetNumberOfProcessors ) { stripingLock.unlock( lock ); lock = stripingLock.writeLock(); awaitAllCompleted(); int processors = targetNumberOfProcessors; while ( numberOfForkedProcessors < processors ) { if ( forkedProcessors[numberOfForkedProcessors] == null ) { forkedProcessors[numberOfForkedProcessors] = new ForkedProcessor( numberOfForkedProcessors, tail.get() ); } numberOfForkedProcessors++; } if ( numberOfForkedProcessors > processors ) { numberOfForkedProcessors = processors; // Excess processors will notice that they are not needed right now, and will park until they are. // The most important thing here is that future Units will have a lower number of processor as expected max. } } return lock; }
protected ForkedProcessorStep( StageControl control, String name, Configuration config, StatsProvider... statsProviders ) { super( control, name, config, statsProviders ); this.maxProcessors = config.maxNumberOfProcessors(); this.forkedProcessors = new Object[this.maxProcessors]; stripingLock = new StampedLock(); Unit noop = new Unit( -1, null, 0 ); head = new AtomicReference<>( noop ); tail = new AtomicReference<>( noop ); stripingLock.unlock( applyProcessorCount( stripingLock.readLock() ) ); downstreamSender = new CompletedBatchesSender( name + " [CompletedBatchSender]" ); maxQueueLength = 200 + maxProcessors; }
/** * enqueue the attempt into our underlying queue. since it's expensive to dynamically * resize the queue, we have a separate rejection threshold which, if less than 0 is * ignored, but otherwise is the practical cap on the size of the queue. */ private boolean tryEnqueueAttempt(DelayedExecution<T> delayedExecution) { int rejectionThreshold = queueRejectionThreshold.get(); if (rejectionThreshold < 0) { return requestQueue.offer(delayedExecution); } long stamp = stampedLock.readLock(); try { while (requestQueue.size() < rejectionThreshold) { long writeStamp = stampedLock.tryConvertToWriteLock(stamp); if (writeStamp != 0L) { stamp = writeStamp; return requestQueue.offer(delayedExecution); } else { stampedLock.unlock(stamp); stamp = stampedLock.writeLock(); } } return false; } finally { stampedLock.unlock(stamp); } }
public static void main(String[] args) { ExecutorService executor = Executors.newFixedThreadPool(2); StampedLock lock = new StampedLock(); executor.submit(() -> { long stamp = lock.readLock(); try { if (count == 0) { stamp = lock.tryConvertToWriteLock(stamp); if (stamp == 0L) { System.out.println("Could not convert to write lock"); stamp = lock.writeLock(); } count = 23; } System.out.println(count); } finally { lock.unlock(stamp); } }); ConcurrentUtils.stop(executor); }
public static void main(String[] args) { ExecutorService executor = Executors.newFixedThreadPool(2); StampedLock lock = new StampedLock(); executor.submit(() -> { long stamp = lock.tryOptimisticRead(); try { System.out.println("Optimistic Lock Valid: " + lock.validate(stamp)); ConcurrentUtils.sleep(1); System.out.println("Optimistic Lock Valid: " + lock.validate(stamp)); ConcurrentUtils.sleep(2); System.out.println("Optimistic Lock Valid: " + lock.validate(stamp)); } finally { lock.unlock(stamp); } }); executor.submit(() -> { long stamp = lock.writeLock(); try { System.out.println("Write Lock acquired"); ConcurrentUtils.sleep(2); } finally { lock.unlock(stamp); System.out.println("Write done"); } }); ConcurrentUtils.stop(executor); }
@Override public <R> Optional<R> execute(Supplier<R> executeTask) { long stamp = this.lock.tryReadLock(); if (stamp != 0L) { try { return Optional.of(executeTask.get()); } finally { this.lock.unlock(stamp); } } return Optional.empty(); }
@Override public <R, E extends Exception> Optional<R> execute(ExceptionSupplier<R, E> executeTask) throws E { long stamp = this.lock.tryReadLock(); if (stamp != 0L) { try { return Optional.of(executeTask.get()); } finally { this.lock.unlock(stamp); } } return Optional.empty(); }
@Override public <E extends Exception> void execute(ExceptionRunnable<E> executeTask) throws E { long stamp = this.lock.tryReadLock(); if (stamp != 0L) { try { executeTask.run(); } finally { this.lock.unlock(stamp); } } }
@Override public boolean containsKey(Object key) { int seg = getSegment(key); long stamp = segmentLocks[seg].readLock(); try { return segments[seg].containsKey(key) || sorted.containsKey(key); } finally { segmentLocks[seg].unlock(stamp); } }
@Override public synchronized void start() { this.lifecycleStamp.ifPresent(stamp -> this.lifecycleLock.unlock(stamp)); this.manager.start(); if (this.statistics != null) { this.statistics.reset(); } }