private void addBatchToEmitQueue(Batch batch) { limitBuffersToEmitSize(); buffersToEmit.addLast(batch); approximateBuffersToEmitCount.incrementAndGet(); approximateEventsToEmitCount.addAndGet(batch.eventCount.get()); }
public void add(T value) { if (value.getPriority() > Priority.DEFAULT_PRIORITY) { _highPriority.addLast(value); } else { _lowPriority.addFirst(value); } }
public void checkContext(String s, boolean front) { if (!front && pendingContext.size() > MAX_PENDING) return; // queue is full if (!harvestedContext.contains(s) && !pendingContext.contains(s)) { if (front) pendingContext.addFirst(s); else pendingContext.addLast(s); } while (pendingContext.size() > MAX_PENDING) pendingContext.removeLast(); if (harvestedContext.size() > MAX_HARVESTED) harvestedContext.clear(); }
void doRespond(SimpleServerRpcConnection conn, RpcResponse resp) throws IOException { boolean added = false; // If there is already a write in progress, we don't wait. This allows to free the handlers // immediately for other tasks. if (conn.responseQueue.isEmpty() && conn.responseWriteLock.tryLock()) { try { if (conn.responseQueue.isEmpty()) { // If we're alone, we can try to do a direct call to the socket. It's // an optimization to save on context switches and data transfer between cores.. if (processResponse(conn, resp)) { return; // we're done. } // Too big to fit, putting ahead. conn.responseQueue.addFirst(resp); added = true; // We will register to the selector later, outside of the lock. } } finally { conn.responseWriteLock.unlock(); } } if (!added) { conn.responseQueue.addLast(resp); } registerForWrite(conn); } }
if (lastFlush != null) { Callback c = new Callback(future, lengthAfterFlush, Collections.emptyList()); waitingAckQueue.addLast(c);
headerBuf.writerIndex(headerLen); Callback c = new Callback(future, nextPacketOffsetInBlock + dataLen, datanodeList); waitingAckQueue.addLast(c);
@Override public void channelActive(ChannelHandlerContext ctx) throws Exception { System.out.println("channelActive: " + ctx.channel()); channels.addLast(ctx.channel()); }
private WorkState(WorkState workState, WorkAccepted workAccepted) { ConcurrentLinkedDeque<Work> tmp_pendingWork = new ConcurrentLinkedDeque<>(workState.pendingWork); Set<String> tmp_acceptedWorkIds = new HashSet<>(workState.acceptedWorkIds); tmp_pendingWork.addLast(workAccepted.work); tmp_acceptedWorkIds.add(workAccepted.work.workId); workInProgress = new HashMap<>(workState.workInProgress); acceptedWorkIds = tmp_acceptedWorkIds; doneWorkIds = new HashSet<>(workState.doneWorkIds); pendingWork = tmp_pendingWork; }
public WorkState(WorkState workState, WorkerFailed workerFailed) { Map<String, Work> tmp_workInProgress = new HashMap<>(workState.workInProgress); ConcurrentLinkedDeque<Work> tmp_pendingWork = new ConcurrentLinkedDeque<>(workState.pendingWork); tmp_pendingWork.addLast(workState.workInProgress.get(workerFailed.workId)); tmp_workInProgress.remove(workerFailed.workId); workInProgress = tmp_workInProgress; acceptedWorkIds = new HashSet<>(workState.acceptedWorkIds); doneWorkIds = new HashSet<>(workState.doneWorkIds); pendingWork = tmp_pendingWork; }
public WorkState(WorkState workState, WorkerTimedOut workerTimedOut) { Map<String, Work> tmp_workInProgress = new HashMap<>(workState.workInProgress); ConcurrentLinkedDeque<Work> tmp_pendingWork = new ConcurrentLinkedDeque<>(workState.pendingWork); tmp_pendingWork.addLast(workState.workInProgress.get(workerTimedOut.workId)); tmp_workInProgress.remove(workerTimedOut.workId); workInProgress = tmp_workInProgress; acceptedWorkIds = new HashSet<>(workState.acceptedWorkIds); doneWorkIds = new HashSet<>(workState.doneWorkIds); pendingWork = tmp_pendingWork; }
private void addBatchToEmitQueue(Batch batch) { limitBuffersToEmitSize(); buffersToEmit.addLast(batch); approximateBuffersToEmitCount.incrementAndGet(); approximateEventsToEmitCount.addAndGet(batch.eventCount.get()); }
@Override public void addLast(SourceRecord sourceRecord) { waitForCapacity(); super.addLast(sourceRecord); }
private void addBatchToEmitQueue(Batch batch) { limitBuffersToEmitSize(); buffersToEmit.addLast(batch); approximateBuffersToEmitCount.incrementAndGet(); approximateEventsToEmitCount.addAndGet(batch.eventCount.get()); }
public void postNotification(int notificationId, Object... args) { final Notification notification = new Notification(notificationId, args); notificationsQueue.addLast(notification); synchronized (notificationsQueue) { notificationsQueue.notifyAll(); } }
private void addBatchToEmitQueue(Batch batch) { limitBuffersToEmitSize(); buffersToEmit.addLast(batch); approximateBuffersToEmitCount.incrementAndGet(); approximateEventsToEmitCount.addAndGet(batch.eventCount.get()); }
private void addBatchToEmitQueue(Batch batch) { limitBuffersToEmitSize(); buffersToEmit.addLast(batch); approximateBuffersToEmitCount.incrementAndGet(); approximateEventsToEmitCount.addAndGet(batch.eventCount.get()); }
@Override public void accept(@Nonnull String line) { if (this.lines.size() >= this.count) { this.lines.removeFirst(); } this.lines.addLast(line); }
public void add(T value) { if (value.getPriority() > Priority.DEFAULT_PRIORITY) { _highPriority.addLast(value); } else { _lowPriority.addFirst(value); } }
/** * {@inheritDoc} */ public void addConnectionListener(ConnectionListener cl) { cls.put(cl, new ConnectionListenerWrapper(cl, false, false)); clq.addLast(cls.get(cl)); poolSize.incrementAndGet(); if (pool.getInternalStatistics().isEnabled()) pool.getInternalStatistics().deltaCreatedCount(); }
@Test void thenPerformOnAllShouldGenerateASynchronousSideEffectForAllElementsOfTheUnderlyingStream() { ConcurrentLinkedDeque<Integer> sideEffects = new ConcurrentLinkedDeque<>(); FluentFutureStream.of( CompletableFuture.completedFuture( Stream.of(1, 2, 3))) .performOnAll(i -> { sideEffects.addLast(i); return CompletableFuture.completedFuture(null); }) .join() .collect(Guavate.toImmutableList()); assertThat(sideEffects).containsOnly(1, 2, 3); }