Refine search
/** * Reset all member variables for next test. * */ private void resetAll() { m_allTests = new ConcurrentLinkedDeque<>(); m_configIssues = new ConcurrentLinkedDeque<>(); m_numFailed = 0; }
@Override public void onFailure(Throwable throwable) { callbackFailures.incrementAndGet(); callbackExceptions.add(throwable); } });
String country = GoogleWebmasterFilter.countryFilterToString(countryFilter); ConcurrentLinkedDeque<String> allPages = new ConcurrentLinkedDeque<>(); int r = 0; while (r <= GET_PAGES_RETRIES) { ++r; log.info(String.format("Get pages at round %d with size %d.", r, toProcess.size())); ConcurrentLinkedDeque<Pair<String, FilterOperator>> nextRound = new ConcurrentLinkedDeque<>(); ExecutorService es = Executors.newFixedThreadPool(10, ExecutorsUtils.newDaemonThreadFactory(Optional.of(log), Optional.of(this.getClass().getSimpleName()))); es.shutdownNow(); log.warn("Timed out while getting all pages for country-{} at round {}. Next round now has size {}.", country, r, nextRound.size()); if (nextRound.isEmpty()) { break;
/** * @throws NoSuchElementException {@inheritDoc} */ public E removeFirst() { return screenNullResult(pollFirst()); }
public void checkContext(String s, boolean front) { if (!front && pendingContext.size() > MAX_PENDING) return; // queue is full if (!harvestedContext.contains(s) && !pendingContext.contains(s)) { if (front) pendingContext.addFirst(s); else pendingContext.addLast(s); } while (pendingContext.size() > MAX_PENDING) pendingContext.removeLast(); if (harvestedContext.size() > MAX_HARVESTED) harvestedContext.clear(); }
private boolean offer0(Channel channel, Object partitionKey, long now) { ConcurrentLinkedDeque<IdleChannel> partition = partitions.get(partitionKey); if (partition == null) { partition = partitions.computeIfAbsent(partitionKey, pk -> new ConcurrentLinkedDeque<>()); } return partition.offerFirst(new IdleChannel(channel, now)); }
private void limitBuffersToEmitSize() { if (approximateBuffersToEmitCount.get() >= config.getBatchQueueSizeLimit()) { Batch droppedBatch = buffersToEmit.pollFirst(); if (droppedBatch != null) { batchFinalized(); approximateBuffersToEmitCount.decrementAndGet(); approximateEventsToEmitCount.addAndGet(-droppedBatch.eventCount.get()); droppedBuffers.incrementAndGet(); log.error( "buffersToEmit queue size reached the limit [%d], dropping the oldest buffer to emit", config.getBatchQueueSizeLimit() ); } } }
private Batch pollBatchFromEmitQueue() { Batch result = buffersToEmit.pollFirst(); if (result == null) { return null; } approximateBuffersToEmitCount.decrementAndGet(); approximateEventsToEmitCount.addAndGet(-result.eventCount.get()); return result; }
final AtomicInteger callbackSuccesses = new AtomicInteger(0); final AtomicInteger callbackFailures = new AtomicInteger(0); final ConcurrentLinkedDeque<Throwable> callbackExceptions = new ConcurrentLinkedDeque<>(); Verifier verifier = new Verifier(); while (recordIterator.hasNext()) { + " Callback Successes = " + callbackSuccesses.get() + "Callback Failures = " + callbackFailures.get());
count = activeInvocationCount.get(); if (count == maxConnections) { return; } while (!activeInvocationCount.compareAndSet(count, count + 1)); RequestHolder next = pendingConnectionRequests.poll(); if (next == null) { activeInvocationCount.decrementAndGet(); return; ConcurrentLinkedDeque<ClientConnectionHolder> queue = connections.get(key); if (queue == null) { connections.putIfAbsent(key, new ConcurrentLinkedDeque<>()); queue = connections.get(key); ClientConnectionHolder existingConnection = queue.poll(); if (existingConnection == null) { break;
private void executeNextTask() { if (_requestCompletedCount.incrementAndGet() == _serviceNames.size()) { _callback.onSuccess(None.none()); _outstandingRequests.clear(); return; } _outstandingRequests.add(_executorService.submit(() -> execute())); }
@Override public void record(final Event event) { _events.push(event); final int events = _eventsCount.incrementAndGet(); if (events > _bufferSize) { _events.pollFirst(); } }
/** * Construct normal file logger. * * @param endpoint Endpoint. * @param igfsName IGFS name. * @param dir Log file path. * @param batchSize Batch size. */ private IgfsLogger(String endpoint, String igfsName, String dir, int batchSize) { A.notNull(endpoint, "endpoint cannot be null"); A.notNull(dir, "dir cannot be null"); A.ensure(batchSize > 0, "batch size cannot be negative"); enabled = true; this.endpoint = endpoint; this.batchSize = batchSize; pid = U.jvmPid(); File dirFile = new File(dir); A.ensure(dirFile.isDirectory(), "dir must point to a directory"); A.ensure(dirFile.exists(), "dir must exist"); file = new File(dirFile, "igfs-log-" + igfsName + "-" + pid + ".csv"); entries = new ConcurrentLinkedDeque<>(); cnt = new AtomicInteger(); useCnt = new AtomicInteger(); rwLock = new ReentrantReadWriteLock(); flushLock = new ReentrantLock(); flushCond = flushLock.newCondition(); flushWorker = new Thread(new FlushWorker()); flushWorker.setDaemon(true); flushWorker.start(); }
public void returnConnection(ClientConnectionHolder connection) { activeInvocationCount.decrementAndGet(); if (connection.getConnection().isOpen()) { connections.get(connection.sslContext == null ? NULL_SSL_CONTEXT : connection.sslContext).add(connection); } runPending(); }
@Override public void doStop() throws Exception { if (_lease!=null) _lease.close(); while(true) { ReservedThread thread = _stack.pollFirst(); if (thread == null) break; _size.decrementAndGet(); thread.stop(); } super.doStop(); }
entries0 = entries; entries = new ConcurrentLinkedDeque<>(); cnt.set(0);
@SafeVarargs public MergingPublisher(Publisher<? extends T>... publishers) { if (publishers.length < 2) { throw new IllegalArgumentException("At least 2 publishers must be supplied to merge"); } publisherCount.set(publishers.length); //noinspection ManualArrayToCollectionCopy for (Publisher<? extends T> publisher : publishers) { upstreamPublishers.add(publisher); } }
@Test public void sendNoContext() throws Exception { int size = 1000; ConcurrentLinkedDeque<Integer> expected = new ConcurrentLinkedDeque<>(); ConcurrentLinkedDeque<Integer> obtained = new ConcurrentLinkedDeque<>(); startNodes(2); CountDownLatch latch = new CountDownLatch(1); vertices[1].eventBus().<Integer>consumer(ADDRESS1, msg -> { obtained.add(msg.body()); if (obtained.size() == expected.size()) { assertEquals(new ArrayList<>(expected), new ArrayList<>(obtained)); testComplete(); } }).completionHandler(ar -> { assertTrue(ar.succeeded()); latch.countDown(); }); latch.await(); EventBus bus = vertices[0].eventBus(); for (int i = 0;i < size;i++) { expected.add(i); bus.send(ADDRESS1, i); } await(); }
/** * @throws Exception If failed. */ @Test public void testConcurrentCompletion() throws Exception { GridCompoundFuture<Boolean, Boolean> fut = new GridCompoundFuture<>(CU.boolReducer()); final ConcurrentLinkedDeque<GridFutureAdapter<Boolean>> futs = new ConcurrentLinkedDeque<>(); for (int i = 0; i < 1000; i++) { GridFutureAdapter<Boolean> part = new GridFutureAdapter<>(); fut.add(part); futs.add(part); } fut.markInitialized(); IgniteInternalFuture<?> complete = multithreadedAsync(new Runnable() { @Override public void run() { GridFutureAdapter<Boolean> part; while ((part = futs.poll()) != null) part.onDone(true); } }, 20); complete.get(); assertTrue(fut.isDone()); }
long toOffloadSize = 0; ConcurrentLinkedDeque<LedgerInfo> toOffload = new ConcurrentLinkedDeque(); } else if (sizeSummed > threshold) { toOffloadSize += size; toOffload.addFirst(e.getValue()); if (toOffload.size() > 0) { log.info("[{}] Going to automatically offload ledgers {}" + ", total size = {}, already offloaded = {}, to offload = {}", name, toOffload.stream().map(l -> l.getLedgerId()).collect(Collectors.toList()), sizeSummed, alreadyOffloadedSize, toOffloadSize); } else {