@Override public void onResponse(ShardResponse shardResponse) { shardsResponses.add(shardResponse); logger.trace("{}: got response from {}", actionName, shardId); if (responsesCountDown.countDown()) { finishAndNotifyListener(listener, shardsResponses); } }
@Override public void onResponse(Function<String, DiscoveryNode> nodeLookup) { synchronized (clusterMap) { clusterMap.put(cluster, nodeLookup); } if (countDown.countDown()) { listener.onResponse((clusterAlias, nodeId) -> clusterMap.getOrDefault(clusterAlias, nullFunction).apply(nodeId)); } }
@Override public void onFailure(Exception e) { if (failure.compareAndSet(null, e) == false) { failure.get().addSuppressed(e); } if (countDown.countDown()) { delegate.onFailure(failure.get()); } } }
@Override public void onResponse(Object o) { if (countDown.countDown()) { listener.onResponse(null); } }
/** * Forcefully counts down an operation and executes the provided runnable * if all expected operations where executed */ void countDown() { assert counter.isCountedDown() == false : "more operations executed than specified"; if (counter.countDown()) { onFinish.run(); } }
private void onFreedContext(boolean freed) { if (freed) { freedSearchContexts.incrementAndGet(); } if (expectedOps.countDown()) { boolean succeeded = hasFailed.get() == false; listener.onResponse(new ClearScrollResponse(succeeded, freedSearchContexts.get())); } }
@Override public void onResponse(ClusterSearchShardsResponse clusterSearchShardsResponse) { searchShardsResponses.put(clusterName, clusterSearchShardsResponse); if (responsesCountDown.countDown()) { RemoteTransportException exception = transportException.get(); if (exception == null) { listener.onResponse(searchShardsResponses); } else { listener.onFailure(transportException.get()); } } }
private void countDownAndSendResponseIfDone(String syncId, List<ShardRouting> shards, ShardId shardId, int totalShards, ActionListener<ShardsSyncedFlushResult> listener, CountDown countDown, Map<ShardRouting, ShardSyncedFlushResponse> results) { if (countDown.countDown()) { assert results.size() == shards.size(); listener.onResponse(new ShardsSyncedFlushResult(shardId, syncId, totalShards, results)); } }
@Override public void onResponse(ShardsSyncedFlushResult syncedFlushResult) { results.get(index).add(syncedFlushResult); if (countDown.countDown()) { listener.onResponse(new SyncedFlushResponse(results)); } }
@Override public void handleException(TransportException exp) { logger.trace(() -> new ParameterizedMessage("{} error while performing pre synced flush on [{}], skipping", shardId, shard), exp); if (countDown.countDown()) { listener.onResponse(presyncResponses); } }
@Override protected synchronized void processAsyncFetch(List<NodeGatewayStartedShards> responses, List<FailedNodeException> failures, long fetchingRound) { fetchResponses.add(new Response(shardId, responses, failures)); if (expectedOps.countDown()) { finish(); } }
@Override public void handleResponse(PreSyncedFlushResponse response) { PreSyncedFlushResponse existing = presyncResponses.putIfAbsent(node.getId(), response); assert existing == null : "got two answers for node [" + node + "]"; // count after the assert so we won't decrement twice in handleException if (countDown.countDown()) { listener.onResponse(presyncResponses); } }
private void onFailedFreedContext(Throwable e, DiscoveryNode node) { logger.warn(() -> new ParameterizedMessage("Clear SC failed on node[{}]", node), e); /* * We have to set the failure marker before we count down otherwise we can expose the failure marker before we have set it to a * racing thread successfully freeing a context. This would lead to that thread responding that the clear scroll succeeded. */ hasFailed.set(true); if (expectedOps.countDown()) { listener.onResponse(new ClearScrollResponse(false, freedSearchContexts.get())); } } }
@Override public void onResponse(T element) { results.setOnce(pos.incrementAndGet() - 1, element); if (countDown.countDown()) { if (failure.get() != null) { delegate.onFailure(failure.get()); } else { List<T> collect = this.results.asList(); collect.addAll(defaults); delegate.onResponse(Collections.unmodifiableList(collect)); } } }
@Override public void onFailure(Exception e) { logger.debug("{} unexpected error while executing synced flush", shardId); final int totalShards = indexMetaData.getNumberOfReplicas() + 1; results.get(index).add(new ShardsSyncedFlushResult(shardId, totalShards, e.getMessage())); if (countDown.countDown()) { listener.onResponse(new SyncedFlushResponse(results)); } } });
@Override public void onFailure(Exception e) { RemoteTransportException exception = new RemoteTransportException("error while communicating with remote cluster [" + clusterName + "]", e); if (transportException.compareAndSet(null, exception) == false) { exception = transportException.accumulateAndGet(exception, (previous, current) -> { current.addSuppressed(previous); return current; }); } if (responsesCountDown.countDown()) { listener.onFailure(exception); } } });
@Override public void onNodeAck(DiscoveryNode node, @Nullable Exception e) { if (node.equals(masterNode) == false && ackedTaskListener.mustAck(node) == false) { return; } if (e == null) { logger.trace("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion); } else { this.lastFailure = e; logger.debug(() -> new ParameterizedMessage( "ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion), e); } if (countDown.countDown()) { finish(); } }
@Override public void onFailure(Exception e) { logger.trace("{}: got failure from {}", actionName, shardId); int totalNumCopies = clusterState.getMetaData().getIndexSafe(shardId.getIndex()).getNumberOfReplicas() + 1; ShardResponse shardResponse = newShardResponse(); ReplicationResponse.ShardInfo.Failure[] failures; if (TransportActions.isShardNotAvailableException(e)) { failures = new ReplicationResponse.ShardInfo.Failure[0]; } else { ReplicationResponse.ShardInfo.Failure failure = new ReplicationResponse.ShardInfo.Failure(shardId, null, e, ExceptionsHelper.status(e), true); failures = new ReplicationResponse.ShardInfo.Failure[totalNumCopies]; Arrays.fill(failures, failure); } shardResponse.setShardInfo(new ReplicationResponse.ShardInfo(totalNumCopies, 0, failures)); shardsResponses.add(shardResponse); if (responsesCountDown.countDown()) { finishAndNotifyListener(listener, shardsResponses); } } };
@Override public void onCommit(TimeValue commitTime) { TimeValue ackTimeout = ackedTaskListener.ackTimeout(); if (ackTimeout == null) { ackTimeout = TimeValue.ZERO; } final TimeValue timeLeft = TimeValue.timeValueNanos(Math.max(0, ackTimeout.nanos() - commitTime.nanos())); if (timeLeft.nanos() == 0L) { onTimeout(); } else if (countDown.countDown()) { finish(); } else { this.ackTimeoutCallback = threadPool.schedule(timeLeft, ThreadPool.Names.GENERIC, this::onTimeout); // re-check if onNodeAck has not completed while we were scheduling the timeout if (countDown.isCountedDown()) { FutureUtils.cancel(ackTimeoutCallback); } } }
@Override protected void innerOnResponse(T result) { assert shardIndex == result.getShardIndex() : "shard index mismatch: " + shardIndex + " but got: " + result.getShardIndex(); onFirstPhaseResult(shardIndex, result); if (counter.countDown()) { SearchPhase phase = moveToNextPhase(clusterNodeLookup); try { phase.run(); } catch (Exception e) { // we need to fail the entire request here - the entire phase just blew up // don't call onShardFailure or onFailure here since otherwise we'd countDown the counter // again which would result in an exception listener.onFailure(new SearchPhaseExecutionException(phase.getName(), "Phase failed", e, ShardSearchFailure.EMPTY_ARRAY)); } } }