@Override public int decrementUsage(I id) { AtomicInteger count = this.usage.get(id); return (count != null) ? count.decrementAndGet() : 0; }
@Override public void run() { if (wip.incrementAndGet() == 2) { emit(); if (wip.decrementAndGet() == 0) { downstream.onComplete(); } } } }
@Override public void run() { c.decrementAndGet(); while (c.get() != 0) { } } });
/** */ private synchronized void onQueryDone(UUID nodeId, Long ver) { TreeMap<Long, AtomicInteger> nodeMap = activeQueries.get(nodeId); if (nodeMap == null) return; assert minQry != null; AtomicInteger cntr = nodeMap.get(ver); assert cntr != null && cntr.get() > 0 : "onQueryDone ver=" + ver; if (cntr.decrementAndGet() == 0) { nodeMap.remove(ver); if (nodeMap.isEmpty()) activeQueries.remove(nodeId); if (ver.equals(minQry)) minQry = activeMinimal(); } }
public void finish(Map<byte[], List<Cell>> familyMaps) { if (!isEnable()) { return; } for (Map.Entry<byte[], List<Cell>> e : familyMaps.entrySet()) { Store store = this.region.getStore(e.getKey()); if (store == null || e.getValue() == null) { continue; } if (e.getValue().size() > this.parallelPutToStoreThreadLimitCheckMinColumnCount) { AtomicInteger counter = preparePutToStoreMap.get(e.getKey()); // preparePutToStoreMap will be cleared when changing the configuration, so it may turn // into a negative value. It will be not accuracy in a short time, it's a trade-off for // performance. if (counter != null && counter.decrementAndGet() < 0) { counter.incrementAndGet(); } } } }
private void handleSubscribeOneTopicError(String topicName, Throwable error, CompletableFuture<Void> subscribeFuture) { log.warn("[{}] Failed to subscribe for topic [{}] in topics consumer {}", topic, topicName, error.getMessage()); client.externalExecutorProvider().getExecutor().submit(() -> { AtomicInteger toCloseNum = new AtomicInteger(0); consumers.values().stream().filter(consumer1 -> { String consumerTopicName = consumer1.getTopic(); if (TopicName.get(consumerTopicName).getPartitionedTopicName().equals(topicName)) { toCloseNum.incrementAndGet(); return true; } else { return false; } }).collect(Collectors.toList()).forEach(consumer2 -> { consumer2.closeAsync().whenComplete((r, ex) -> { consumer2.subscribeFuture().completeExceptionally(error); allTopicPartitionsNumber.decrementAndGet(); consumers.remove(consumer2.getTopic()); if (toCloseNum.decrementAndGet() == 0) { log.warn("[{}] Failed to subscribe for topic [{}] in topics consumer, subscribe error: {}", topic, topicName, error.getMessage()); topics.remove(topicName); checkState(allTopicPartitionsNumber.get() == consumers.values().size()); subscribeFuture.completeExceptionally(error); } return; }); }); }); }
@Override public void release() { if (referenceCount.decrementAndGet() < 0) { referenceCount.incrementAndGet(); throw new IllegalStateException("Attempted to decrement the reference count below 0"); } if (referenceCount.get() == 0) { buf = null; } }
public static boolean isFromJoinOrUnionTable(SQLExpr expr) { SQLObject temp = expr; AtomicInteger counter = new AtomicInteger(10); while (temp != null && !(expr instanceof SQLSelectQueryBlock) && !(expr instanceof SQLJoinTableSource) && !(expr instanceof SQLUnionQuery) && counter.get() > 0) { counter.decrementAndGet(); temp = temp.getParent(); if (temp instanceof SQLSelectQueryBlock) { SQLTableSource from = ((SQLSelectQueryBlock) temp).getFrom(); if (from instanceof SQLJoinTableSource || from instanceof SQLUnionQuery) { return true; } } if (temp instanceof SQLJoinTableSource || temp instanceof SQLUnionQuery) { return true; } } return false; }
private boolean canRunForNode(LlapNodeId nodeId, Set<LlapNodeId> currentRunDisabledNodes) { if (currentRunDisabledNodes.contains(nodeId)) { return false; } else { AtomicInteger count = runningRequests.get(nodeId); if (count == null) { count = new AtomicInteger(0); AtomicInteger old = runningRequests.putIfAbsent(nodeId, count); count = old != null ? old : count; } if (count.incrementAndGet() <= maxConcurrentRequestsPerNode) { return true; } else { count.decrementAndGet(); return false; } } }
@Override public void operationComplete(Future<Collection<RFuture<Void>>> future) throws Exception { if (future.isSuccess()) { futures.addAll(future.getNow()); } if (masters.decrementAndGet() == 0) { final AtomicInteger nodes = new AtomicInteger(futures.size()); for (RFuture<Void> nodeFuture : futures) { nodeFuture.addListener(new FutureListener<Void>() { @Override public void operationComplete(Future<Void> future) throws Exception { if (nodes.decrementAndGet() == 0) { result.trySuccess(null); } } }); } } } });
final AtomicInteger wip = new AtomicInteger(1); if (wip.decrementAndGet() == 0) { Throwable ex = error.terminate(); if (ex == null) {
synchronized int decrementRegionCountForTable(TableName tableName, int count) { return tableAndRegionInfo.get(tableName).decrementAndGet(); }
public void run(int iterations, int limit, Executor executor, Supplier<Long> latency) { AtomicInteger requests = new AtomicInteger(); AtomicInteger busy = new AtomicInteger(); AtomicInteger counter = new AtomicInteger(); Executors.newSingleThreadScheduledExecutor().scheduleAtFixedRate(() -> { System.out.println("" + counter.incrementAndGet() + " total=" + requests.getAndSet(0) + " busy=" + busy.get()); }, 1, 1, TimeUnit.SECONDS); Semaphore sem = new Semaphore(limit, true); for (int i = 0; i < iterations; i++) { requests.incrementAndGet(); busy.incrementAndGet(); executor.execute(() -> { try { sem.acquire(); TimeUnit.MILLISECONDS.sleep(latency.get()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } finally { sem.release(); busy.decrementAndGet(); } }); } }
@Override public void onComplete() { threadsRunning.incrementAndGet(); System.out.println(">>> Busyobserver received onComplete"); onComplete = true; int concurrentThreads = threadsRunning.get(); int maxThreads = maxConcurrentThreads.get(); if (concurrentThreads > maxThreads) { maxConcurrentThreads.compareAndSet(maxThreads, concurrentThreads); } threadsRunning.decrementAndGet(); }
@Test(timeout = 10000) public void interruptibleDirectTask() throws Exception { Scheduler scheduler = getScheduler(); final AtomicInteger sync = new AtomicInteger(2); final AtomicBoolean isInterrupted = new AtomicBoolean(); Disposable d = scheduler.scheduleDirect(new Runnable() { @Override public void run() { if (sync.decrementAndGet() != 0) { while (sync.get() != 0) { } } try { Thread.sleep(1000); } catch (InterruptedException ex) { isInterrupted.set(true); } } }); if (sync.decrementAndGet() != 0) { while (sync.get() != 0) { } } Thread.sleep(500); d.dispose(); int i = 20; while (i-- > 0 && !isInterrupted.get()) { Thread.sleep(50); } assertTrue("Interruption did not propagate", isInterrupted.get()); }
@Override public void run() { if (wip.incrementAndGet() == 2) { emit(); if (wip.decrementAndGet() == 0) { downstream.onComplete(); } } } }
AtomicInteger ai = refCountReenterCount.get(); if (owner != null) { ai.incrementAndGet(); } else { if (ai.decrementAndGet() <= 0) { refCountOwner.set(null); ai.set(0); AtomicInteger ai = refCountReenterCount.get(); if (ai == null) { ai = new AtomicInteger(0); refCountReenterCount.set(ai);
@Override public void run() { c.decrementAndGet(); while (c.get() != 0) { } } });
@Override public void operationComplete(Future<Collection<RFuture<Void>>> future) throws Exception { if (future.isSuccess()) { futures.addAll(future.getNow()); } if (masters.decrementAndGet() == 0) { final AtomicInteger nodes = new AtomicInteger(futures.size()); for (RFuture<Void> nodeFuture : futures) { nodeFuture.addListener(new FutureListener<Void>() { @Override public void operationComplete(Future<Void> future) throws Exception { if (nodes.decrementAndGet() == 0) { result.trySuccess(null); } } }); } } } });
@Test public void testPipelinedWithPendingResponse() throws Exception { int numReq = 10; waitFor(numReq); AtomicInteger inflight = new AtomicInteger(); AtomicInteger count = new AtomicInteger(); server.requestHandler(req -> { int val = count.getAndIncrement(); assertEquals(0, inflight.getAndIncrement()); vertx.setTimer(100, v -> { inflight.decrementAndGet(); req.response().end("" + val); }); }); startServer(); client.close(); client = vertx.createHttpClient(new HttpClientOptions().setPipelining(true).setMaxPoolSize(1).setKeepAlive(true)); for (int i = 0;i < numReq;i++) { String expected = "" + i; client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", onSuccess(resp -> { resp.bodyHandler(body -> { assertEquals(expected, body.toString()); complete(); }); })).end(TestUtils.randomAlphaString(1024)); } await(); }