CountedCollector(Consumer<R> resultConsumer, int expectedOps, Runnable onFinish, SearchPhaseContext context) { this.resultConsumer = resultConsumer; this.counter = new CountDown(expectedOps); this.onFinish = onFinish; this.context = context; }
@Override public void onResponse(ShardResponse shardResponse) { shardsResponses.add(shardResponse); logger.trace("{}: got response from {}", actionName, shardId); if (responsesCountDown.countDown()) { finishAndNotifyListener(listener, shardsResponses); } }
connectionListener.onResponse(null); } else { CountDown countDown = new CountDown(seeds.size()); remoteClusters.putAll(this.remoteClusters); for (Map.Entry<String, Tuple<String, List<Tuple<String, Supplier<DiscoveryNode>>>>> entry : seeds.entrySet()) { remote.updateSeedNodes(proxyAddress, seedList, ActionListener.wrap( response -> { if (countDown.countDown()) { connectionListener.onResponse(response); if (countDown.fastForward()) { connectionListener.onFailure(exception);
/** * Forcefully counts down an operation and executes the provided runnable * if all expected operations where executed */ void countDown() { assert counter.isCountedDown() == false : "more operations executed than specified"; if (counter.countDown()) { onFinish.run(); } }
final ShardId shardId, final ActionListener<Map<String, PreSyncedFlushResponse>> listener) { final CountDown countDown = new CountDown(shards.size()); final ConcurrentMap<String, PreSyncedFlushResponse> presyncResponses = ConcurrentCollections.newConcurrentMap(); for (final ShardRouting shard : shards) { if (node == null) { logger.trace("{} shard routing {} refers to an unknown node. skipping.", shardId, shard); if (countDown.countDown()) { listener.onResponse(presyncResponses);
public void onTimeout() { if (countDown.fastForward()) { logger.trace("timeout waiting for acknowledgement for cluster_state update (version: {})", clusterStateVersion); ackedTaskListener.onAckTimeout(); } } }
public void run() { if (expectedOps.isCountedDown()) { listener.onResponse(new ClearScrollResponse(true, 0)); return;
@Override public void onCommit(TimeValue commitTime) { TimeValue ackTimeout = ackedTaskListener.ackTimeout(); if (ackTimeout == null) { ackTimeout = TimeValue.ZERO; } final TimeValue timeLeft = TimeValue.timeValueNanos(Math.max(0, ackTimeout.nanos() - commitTime.nanos())); if (timeLeft.nanos() == 0L) { onTimeout(); } else if (countDown.countDown()) { finish(); } else { this.ackTimeoutCallback = threadPool.schedule(timeLeft, ThreadPool.Names.GENERIC, this::onTimeout); // re-check if onNodeAck has not completed while we were scheduling the timeout if (countDown.isCountedDown()) { FutureUtils.cancel(ackTimeoutCallback); } } }
final CountDown completionCounter = new CountDown(totalNumRequest); final List<FieldCapabilitiesIndexResponse> indexResponses = Collections.synchronizedList(new ArrayList<>()); final Runnable onResponse = () -> { if (completionCounter.countDown()) { if (request.isMergeResults()) { listener.onResponse(merge(indexResponses));
@Override public void onFailure(Exception e) { if (countDown.fastForward()) { listener.onFailure(e); } } };
public void run() { if (expectedOps.isCountedDown()) { listener.onResponse(new ClearScrollResponse(true, 0)); return;
private ChannelsConnectedListener(DiscoveryNode node, ConnectionProfile connectionProfile, List<TcpChannel> channels, ActionListener<Transport.Connection> listener) { this.node = node; this.connectionProfile = connectionProfile; this.channels = channels; this.listener = listener; this.countDown = new CountDown(channels.size()); }
@Override public void onFailure(Exception e) { if (failure.compareAndSet(null, e) == false) { failure.get().addSuppressed(e); } if (countDown.countDown()) { delegate.onFailure(failure.get()); } } }
/** * Forcefully counts down an operation and executes the provided runnable * if all expected operations where executed */ void countDown() { assert counter.isCountedDown() == false : "more operations executed than specified"; if (counter.countDown()) { onFinish.run(); } }
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, queryResults.length()); final CountDown counter = new CountDown(docIdsToLoad.length); for (int i = 0; i < docIdsToLoad.length; i++) { final int index = i; if (counter.countDown()) { sendResponse(reducedQueryPhase, fetchResults);
connectionListener.onResponse(null); } else { CountDown countDown = new CountDown(seeds.size()); remoteClusters.putAll(this.remoteClusters); for (Map.Entry<String, Tuple<String, List<Supplier<DiscoveryNode>>>> entry : seeds.entrySet()) { remote.updateSeedNodes(proxyAddress, seedList, ActionListener.wrap( response -> { if (countDown.countDown()) { connectionListener.onResponse(response); if (countDown.fastForward()) { connectionListener.onFailure(exception);
@Override public void onFailure(Exception e) { if (countDown.fastForward()) { // we need to check if it's true since we could have multiple failures listener.onFailure(e); } } });
AsyncShardStoresInfoFetches(DiscoveryNodes nodes, RoutingNodes routingNodes, Set<ShardId> shardIds, ActionListener<IndicesShardStoresResponse> listener) { this.nodes = nodes; this.routingNodes = routingNodes; this.shardIds = shardIds; this.listener = listener; this.fetchResponses = new ConcurrentLinkedQueue<>(); this.expectedOps = new CountDown(shardIds.size()); }
@Override public void onResponse(Function<String, DiscoveryNode> nodeLookup) { synchronized (clusterMap) { clusterMap.put(cluster, nodeLookup); } if (countDown.countDown()) { listener.onResponse((clusterAlias, nodeId) -> clusterMap.getOrDefault(clusterAlias, nullFunction).apply(nodeId)); } }
/** * Forcefully counts down an operation and executes the provided runnable * if all expected operations where executed */ void countDown() { assert counter.isCountedDown() == false : "more operations executed than specified"; if (counter.countDown()) { onFinish.run(); } }