/** * Get the number of elements in this queue added via one of the {@link #add(ByteBuf)} methods. * @return the number of elements in this queue. */ protected final int size() { return bufAndListenerPairs.size(); }
/** * @return whether the buffer is writable */ public synchronized boolean isWritable() { return pending.size() <= highWaterMark; }
/** * @return the actual number of elements in the buffer */ public synchronized int size() { return pending.size(); } }
/** * Get the number of elements in this queue added via one of the {@link #add(ByteBuf)} methods. * @return the number of elements in this queue. */ protected final int size() { return bufAndListenerPairs.size(); }
/** * Gets the number of Runnables currently queued. */ public int numQueuedRunnables() { synchronized (queuedRunnables) { return queuedRunnables.size(); } } }
@Override public int size() { return queue.size(); }
@VisibleForTesting public int getPoolSize() { return objects.size(); }
/** * Gets the number of elements currently in the queue. * @return The number of elements currently in the queue. */ public int size() { lock.lock(); try { return elements.size(); } finally { lock.unlock(); } }
@Override public <T> boolean tryPut(StreamElementQueueEntry<T> streamElementQueueEntry) throws InterruptedException { lock.lockInterruptibly(); try { if (queue.size() < capacity) { addEntry(streamElementQueueEntry); LOG.debug("Put element into ordered stream element queue. New filling degree " + "({}/{}).", queue.size(), capacity); return true; } else { LOG.debug("Failed to put element into ordered stream element queue because it " + "was full ({}/{}).", queue.size(), capacity); return false; } } finally { lock.unlock(); } }
@Override public <T> void put(StreamElementQueueEntry<T> streamElementQueueEntry) throws InterruptedException { lock.lockInterruptibly(); try { while (queue.size() >= capacity) { notFull.await(); } addEntry(streamElementQueueEntry); } finally { lock.unlock(); } }
@Override public Collection<StreamElementQueueEntry<?>> values() throws InterruptedException { lock.lockInterruptibly(); try { StreamElementQueueEntry<?>[] array = new StreamElementQueueEntry[queue.size()]; array = queue.toArray(array); return Arrays.asList(array); } finally { lock.unlock(); } }
private List<T> takeObjects(int elementNum) throws InterruptedException { final List<T> list = new ArrayList<>(elementNum); final ReentrantLock lock = this.lock; lock.lockInterruptibly(); try { while (objects.size() < elementNum) { notEnough.await(); } for (int i = 0; i < elementNum; i++) { list.add(objects.pop()); } return list; } finally { lock.unlock(); } }
private List<T> pollObjects(int elementNum) throws InterruptedException { final List<T> list = new ArrayList<>(elementNum); final ReentrantLock lock = this.lock; lock.lockInterruptibly(); try { if (objects.size() < elementNum) { return Collections.emptyList(); } else { for (int i = 0; i < elementNum; i++) { list.add(objects.pop()); } return list; } } finally { lock.unlock(); } }
private void offer(T theObject) { final ReentrantLock lock = this.lock; lock.lock(); try { if (objects.size() < maxSize) { objects.push(theObject); notEnough.signal(); } else { throw new ISE("Cannot exceed pre-configured maximum size"); } } finally { lock.unlock(); } } }
@Override public AsyncResult poll() throws InterruptedException { lock.lockInterruptibly(); try { while (queue.isEmpty() || !queue.peek().isDone()) { headIsCompleted.await(); } notFull.signalAll(); LOG.debug("Polled head element from ordered stream element queue. New filling degree " + "({}/{}).", queue.size() - 1, capacity); return queue.poll(); } finally { lock.unlock(); } }
@Override public AsyncResult peekBlockingly() throws InterruptedException { lock.lockInterruptibly(); try { while (queue.isEmpty() || !queue.peek().isDone()) { headIsCompleted.await(); } LOG.debug("Peeked head element from ordered stream element queue with filling degree " + "({}/{}).", queue.size(), capacity); return queue.peek(); } finally { lock.unlock(); } }
private void limitFailedBuffersSize() { if (failedBuffers.size() >= config.getBatchQueueSizeLimit()) { failedBuffers.removeFirst(); approximateFailedBuffersCount.decrementAndGet(); droppedBuffers.incrementAndGet(); log.error( "failedBuffers queue size reached the limit [%d], dropping the oldest failed buffer", config.getBatchQueueSizeLimit() ); } }
@Override void handleClose() { if (pendingPushes.remove(this)) { completionHandler.fail("Push reset by client"); } else { concurrentStreams--; while ((maxConcurrentStreams == null || concurrentStreams < maxConcurrentStreams) && pendingPushes.size() > 0) { Push push = pendingPushes.pop(); concurrentStreams++; context.runOnContext(v -> { push.complete(); }); } response.handleClose(); } }
@Override public void finishingComposite(CompositionDefinition compositionDefinition) { // No need to pop anything here; it will be popped by // #finishingAttribute, #finishingCollectionElements, #finishingCollectionIndex, or #finishingEntityIdentifier log.tracef( "%s Finishing composite : %s", StringHelper.repeat( "<<", fetchSourceStack.size() ), compositionDefinition.getName() ); }
@Override public void associationKeyRegistered(AssociationKey associationKey) { // todo : use this information to maintain a map of AssociationKey->FetchSource mappings (associationKey + current FetchSource stack entry) // that mapping can then be used in #foundCircularAssociationKey to build the proper BiDirectionalEntityFetch // based on the mapped owner log.tracef( "%s Registering AssociationKey : %s -> %s", StringHelper.repeat( "..", fetchSourceStack.size() ), associationKey, currentSource() ); fetchedAssociationKeySourceMap.put( associationKey, currentSource() ); }