@Override public Version getVersion() { return localNode.getVersion(); } }
private void sendClusterStateDiff(ClusterState clusterState, Map<Version, BytesReference> serializedDiffs, Map<Version, BytesReference> serializedStates, DiscoveryNode node, TimeValue publishTimeout, SendingController sendingController) { BytesReference bytes = serializedDiffs.get(node.getVersion()); assert bytes != null : "failed to find serialized diff for node " + node + " of version [" + node.getVersion() + "]"; sendClusterStateToNode(clusterState, bytes, node, publishTimeout, sendingController, true, serializedStates); }
/** * Returns the version of the node this connection was established with. */ default Version getVersion() { return getNode().getVersion(); }
private void sendFullClusterState(ClusterState clusterState, Map<Version, BytesReference> serializedStates, DiscoveryNode node, TimeValue publishTimeout, SendingController sendingController) { BytesReference bytes = serializedStates.get(node.getVersion()); if (bytes == null) { try { bytes = serializeFullClusterState(clusterState, node.getVersion()); serializedStates.put(node.getVersion(), bytes); } catch (Exception e) { logger.warn(() -> new ParameterizedMessage("failed to serialize cluster_state before publishing it to node {}", node), e); sendingController.onNodeSendFailed(node, e); return; } } sendClusterStateToNode(clusterState, bytes, node, publishTimeout, sendingController, false, serializedStates); }
static Collection<BiConsumer<DiscoveryNode,ClusterState>> addBuiltInJoinValidators( Collection<BiConsumer<DiscoveryNode,ClusterState>> onJoinValidators) { Collection<BiConsumer<DiscoveryNode, ClusterState>> validators = new ArrayList<>(); validators.add((node, state) -> { MembershipAction.ensureNodesCompatibility(node.getVersion(), state.getNodes()); MembershipAction.ensureIndexCompatibility(node.getVersion(), state.getMetaData()); }); validators.addAll(onJoinValidators); return Collections.unmodifiableCollection(validators); }
private void buildDiffAndSerializeStates(ClusterState clusterState, ClusterState previousState, Set<DiscoveryNode> nodesToPublishTo, boolean sendFullVersion, Map<Version, BytesReference> serializedStates, Map<Version, BytesReference> serializedDiffs) { Diff<ClusterState> diff = null; for (final DiscoveryNode node : nodesToPublishTo) { try { if (sendFullVersion || !previousState.nodes().nodeExists(node)) { // will send a full reference if (serializedStates.containsKey(node.getVersion()) == false) { serializedStates.put(node.getVersion(), serializeFullClusterState(clusterState, node.getVersion())); } } else { // will send a diff if (diff == null) { diff = clusterState.diff(previousState); } if (serializedDiffs.containsKey(node.getVersion()) == false) { serializedDiffs.put(node.getVersion(), serializeDiffClusterState(diff, node.getVersion())); } } } catch (IOException e) { throw new ElasticsearchException("failed to serialize cluster_state for publishing to node {}", e, node); } } }
private Decision isVersionCompatibleRelocatePrimary(final RoutingNodes routingNodes, final String sourceNodeId, final RoutingNode target, final RoutingAllocation allocation) { final RoutingNode source = routingNodes.node(sourceNodeId); if (target.node().getVersion().onOrAfter(source.node().getVersion())) { return allocation.decision(Decision.YES, NAME, "can relocate primary shard from a node with version [%s] to a node with equal-or-newer version [%s]", source.node().getVersion(), target.node().getVersion()); } else { return allocation.decision(Decision.NO, NAME, "cannot relocate primary shard from a node with version [%s] to a node with older version [%s]", source.node().getVersion(), target.node().getVersion()); } }
private Decision isVersionCompatibleAllocatingReplica(final RoutingNodes routingNodes, final String sourceNodeId, final RoutingNode target, final RoutingAllocation allocation) { final RoutingNode source = routingNodes.node(sourceNodeId); if (target.node().getVersion().onOrAfter(source.node().getVersion())) { /* we can allocate if we can recover from a node that is younger or on the same version * if the primary is already running on a newer version that won't work due to possible * differences in the lucene index format etc.*/ return allocation.decision(Decision.YES, NAME, "can allocate replica shard to a node with version [%s] since this is equal-or-newer than the primary version [%s]", target.node().getVersion(), source.node().getVersion()); } else { return allocation.decision(Decision.NO, NAME, "cannot allocate replica shard to a node with version [%s] since this is older than the primary version [%s]", target.node().getVersion(), source.node().getVersion()); } }
@Override protected String getMasterActionName(DiscoveryNode node) { if (node.getVersion().onOrAfter(ResizeAction.COMPATIBILITY_VERSION)){ return super.getMasterActionName(node); } else { // this is for BWC - when we send this to version that doesn't have ResizeAction.NAME registered // we have to send to shrink instead. return ShrinkAction.NAME; } } }
private Decision isVersionCompatible(SnapshotRecoverySource recoverySource, final RoutingNode target, final RoutingAllocation allocation) { if (target.node().getVersion().onOrAfter(recoverySource.version())) { /* we can allocate if we can restore from a snapshot that is older or on the same version */ return allocation.decision(Decision.YES, NAME, "node version [%s] is the same or newer than snapshot version [%s]", target.node().getVersion(), recoverySource.version()); } else { return allocation.decision(Decision.NO, NAME, "node version [%s] is older than the snapshot version [%s]", target.node().getVersion(), recoverySource.version()); } } }
public void sendCanMatch(Transport.Connection connection, final ShardSearchTransportRequest request, SearchTask task, final ActionListener<CanMatchResponse> listener) { if (connection.getNode().getVersion().onOrAfter(Version.V_5_6_0)) { transportService.sendChildRequest(connection, QUERY_CAN_MATCH_NAME, request, task, TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, CanMatchResponse::new)); } else { // this might look weird but if we are in a CrossClusterSearch environment we can get a connection // to a pre 5.latest node which is proxied by a 5.latest node under the hood since we are only compatible with 5.latest // instead of sending the request we shortcut it here and let the caller deal with this -- see #25704 // also failing the request instead of returning a fake answer might trigger a retry on a replica which might be on a // compatible node throw new IllegalArgumentException("can_match is not supported on pre 5.6 nodes"); } }
@Override protected void sendReplicaRequest( final ConcreteReplicaRequest<ResyncReplicationRequest> replicaRequest, final DiscoveryNode node, final ActionListener<ReplicationOperation.ReplicaResponse> listener) { if (node.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { super.sendReplicaRequest(replicaRequest, node, listener); } else { final long pre60NodeCheckpoint = SequenceNumbers.PRE_60_NODE_CHECKPOINT; listener.onResponse(new ReplicaResponse(pre60NodeCheckpoint, pre60NodeCheckpoint)); } }
@Override protected void sendReplicaRequest( final ConcreteReplicaRequest<Request> replicaRequest, final DiscoveryNode node, final ActionListener<ReplicationOperation.ReplicaResponse> listener) { if (node.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { super.sendReplicaRequest(replicaRequest, node, listener); } else { final long pre60NodeCheckpoint = SequenceNumbers.PRE_60_NODE_CHECKPOINT; listener.onResponse(new ReplicaResponse(pre60NodeCheckpoint, pre60NodeCheckpoint)); } }
void handleJoinRequest(final DiscoveryNode node, final ClusterState state, final MembershipAction.JoinCallback callback) { if (nodeJoinController == null) { throw new IllegalStateException("discovery module is not yet started"); } else { // we do this in a couple of places including the cluster update thread. This one here is really just best effort // to ensure we fail as fast as possible. onJoinValidators.stream().forEach(a -> a.accept(node, state)); if (state.getBlocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) == false) { MembershipAction.ensureMajorVersionBarrier(node.getVersion(), state.getNodes().getMinNodeVersion()); } // try and connect to the node, if it fails, we can raise an exception back to the client... transportService.connectToNode(node); // validate the join request, will throw a failure if it fails, which will get back to the // node calling the join request try { membership.sendValidateJoinRequestBlocking(node, state, joinTimeout); } catch (Exception e) { logger.warn(() -> new ParameterizedMessage("failed to validate incoming join request from node [{}]", node), e); callback.onFailure(new IllegalStateException("failure when sending a validation request to node", e)); return; } nodeJoinController.handleJoinRequest(node, callback); } }
new BytesTransportRequest(bytes, node.getVersion()), stateRequestOptions, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
/** Updates the shard snapshot status by sending a {@link UpdateIndexShardSnapshotStatusRequest} to the master node */ void sendSnapshotShardUpdate(final Snapshot snapshot, final ShardId shardId, final ShardSnapshotStatus status, final DiscoveryNode masterNode) { try { if (masterNode.getVersion().onOrAfter(Version.V_6_1_0)) { UpdateIndexShardSnapshotStatusRequest request = new UpdateIndexShardSnapshotStatusRequest(snapshot, shardId, status); transportService.sendRequest(transportService.getLocalNode(), UPDATE_SNAPSHOT_STATUS_ACTION_NAME, request, INSTANCE_SAME); } else { UpdateSnapshotStatusRequestV6 requestV6 = new UpdateSnapshotStatusRequestV6(snapshot, shardId, status); transportService.sendRequest(masterNode, UPDATE_SNAPSHOT_STATUS_ACTION_NAME_V6, requestV6, INSTANCE_SAME); } } catch (Exception e) { logger.warn(() -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", snapshot, status), e); } }
private static DiscoveryNode maybeAddProxyAddress(String proxyAddress, DiscoveryNode node) { if (proxyAddress == null || proxyAddress.isEmpty()) { return node; } else { // resovle proxy address lazy here InetSocketAddress proxyInetAddress = RemoteClusterAware.parseSeedAddress(proxyAddress); return new DiscoveryNode(node.getName(), node.getId(), node.getEphemeralId(), node.getHostName(), node .getHostAddress(), new TransportAddress(proxyInetAddress), node.getAttributes(), node.getRoles(), node.getVersion()); } }
false, false, (request, channel) -> channel.sendResponse( new HandshakeResponse(localNode, clusterName, localNode.getVersion())));
if (node.node().getVersion().before(ResizeAction.COMPATIBILITY_VERSION)) { return allocation.decision(Decision.NO, NAME, "node [%s] is too old to split a shard", node.nodeId());
private void buildRow(Table table, boolean fullId, boolean detailed, DiscoveryNodes discoveryNodes, TaskInfo taskInfo) { table.startRow(); String nodeId = taskInfo.getTaskId().getNodeId(); DiscoveryNode node = discoveryNodes.get(nodeId); table.addCell(taskInfo.getId()); table.addCell(taskInfo.getAction()); table.addCell(taskInfo.getTaskId().toString()); if (taskInfo.getParentTaskId().isSet()) { table.addCell(taskInfo.getParentTaskId().toString()); } else { table.addCell("-"); } table.addCell(taskInfo.getType()); table.addCell(taskInfo.getStartTime()); table.addCell(FORMATTER.format(Instant.ofEpochMilli(taskInfo.getStartTime()))); table.addCell(taskInfo.getRunningTimeNanos()); table.addCell(TimeValue.timeValueNanos(taskInfo.getRunningTimeNanos()).toString()); // Node information. Note that the node may be null because it has left the cluster between when we got this response and now. table.addCell(fullId ? nodeId : Strings.substring(nodeId, 0, 4)); table.addCell(node == null ? "-" : node.getHostAddress()); table.addCell(node.getAddress().address().getPort()); table.addCell(node == null ? "-" : node.getName()); table.addCell(node == null ? "-" : node.getVersion().toString()); if (detailed) { table.addCell(taskInfo.getDescription()); } table.endRow(); }