@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); node = new DiscoveryNode(in); }
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); node = new DiscoveryNode(in); }
ProxyRequest(StreamInput in, Writeable.Reader<T> reader) throws IOException { super(in); targetNode = new DiscoveryNode(in); wrapped = reader.read(in); }
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); node = new DiscoveryNode(in); }
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); sourceNode = new DiscoveryNode(in); masterNode = new DiscoveryNode(in); clusterName = new ClusterName(in); }
ShardActiveResponse(StreamInput in) throws IOException { shardActive = in.readBoolean(); node = new DiscoveryNode(in); }
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); targetNode = new DiscoveryNode(in); clusterName = new ClusterName(in); masterNode = new DiscoveryNode(in); clusterStateVersion = in.readLong(); }
/** * Temporary method that allows turning a {@link NodeView} into a {@link DiscoveryNode}. This representation will never be used in * practice, because in 6.4 and above a consumer of the response will only be able to retrieve a representation of {@link NodeView} * objects. * * Effectively this will be used to hold the state of the object in 6.x so there is no need to have 2 backing objects that * represent the state of the Response. In practice these will always be read by a consumer as a NodeView, but it eases the * transition to master which will not contain any representation of a {@link DiscoveryNode}. */ DiscoveryNode convertToDiscoveryNode() { return new DiscoveryNode(name, nodeId, "", "", "", new TransportAddress(TransportAddress.META_ADDRESS, 0), Collections.emptyMap(), Collections.emptySet(), Version.CURRENT); }
/** Creates a DiscoveryNode representing the local node. */ public static DiscoveryNode createLocal(Settings settings, TransportAddress publishAddress, String nodeId) { Map<String, String> attributes = Node.NODE_ATTRIBUTES.getAsMap(settings); Set<Role> roles = getRolesFromSettings(settings); return new DiscoveryNode(Node.NODE_NAME_SETTING.get(settings), nodeId, publishAddress, attributes, roles, Version.CURRENT); }
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); fromNode = new DiscoveryNode(in); indices = new IndexMetaData[in.readVInt()]; for (int i = 0; i < indices.length; i++) { indices[i] = IndexMetaData.readFrom(in); } }
static DiscoveryNode buildSeedNode(String clusterName, String address, boolean proxyMode) { if (proxyMode) { TransportAddress transportAddress = new TransportAddress(TransportAddress.META_ADDRESS, 0); String hostName = address.substring(0, indexOfPortSeparator(address)); return new DiscoveryNode("", clusterName + "#" + address, UUIDs.randomBase64UUID(), hostName, address, transportAddress, Collections.singletonMap("server_name", hostName), EnumSet.allOf(DiscoveryNode.Role.class), Version.CURRENT.minimumCompatibilityVersion()); } else { TransportAddress transportAddress = new TransportAddress(RemoteClusterAware.parseSeedAddress(address)); return new DiscoveryNode(clusterName + "#" + transportAddress.toString(), transportAddress, Version.CURRENT.minimumCompatibilityVersion()); } }
PingResponse(StreamInput in) throws IOException { this.clusterName = new ClusterName(in); this.node = new DiscoveryNode(in); this.master = in.readOptionalWriteable(DiscoveryNode::new); this.clusterStateVersion = in.readLong(); this.id = in.readLong(); }
protected void sendPings(final TimeValue timeout, final PingingRound pingingRound) { final ClusterState lastState = contextProvider.clusterState(); final UnicastPingRequest pingRequest = new UnicastPingRequest(pingingRound.id(), timeout, createPingResponse(lastState)); List<TransportAddress> temporalAddresses = temporalResponses.stream().map(pingResponse -> { assert clusterName.equals(pingResponse.clusterName()) : "got a ping request from a different cluster. expected " + clusterName + " got " + pingResponse.clusterName(); return pingResponse.node().getAddress(); }).collect(Collectors.toList()); final Stream<TransportAddress> uniqueAddresses = Stream.concat(pingingRound.getSeedAddresses().stream(), temporalAddresses.stream()).distinct(); // resolve what we can via the latest cluster state final Set<DiscoveryNode> nodesToPing = uniqueAddresses .map(address -> { DiscoveryNode foundNode = lastState.nodes().findByAddress(address); if (foundNode != null && transportService.nodeConnected(foundNode)) { return foundNode; } else { return new DiscoveryNode( address.toString(), address, emptyMap(), emptySet(), Version.CURRENT.minimumCompatibilityVersion()); } }).collect(Collectors.toSet()); nodesToPing.forEach(node -> sendPingRequestToNode(node, timeout, pingingRound, pingRequest)); }
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); recoveryId = in.readLong(); shardId = ShardId.readShardId(in); targetAllocationId = in.readString(); sourceNode = new DiscoveryNode(in); targetNode = new DiscoveryNode(in); metadataSnapshot = new Store.MetadataSnapshot(in); primaryRelocation = in.readBoolean(); if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { startingSeqNo = in.readLong(); } else { startingSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; } }
public ClusterSearchShardsResponse(StreamInput in) throws IOException { super(in); groups = new ClusterSearchShardsGroup[in.readVInt()]; for (int i = 0; i < groups.length; i++) { groups[i] = ClusterSearchShardsGroup.readSearchShardsGroupResponse(in); } nodes = new DiscoveryNode[in.readVInt()]; for (int i = 0; i < nodes.length; i++) { nodes[i] = new DiscoveryNode(in); } if (in.getVersion().onOrAfter(Version.V_5_1_1)) { int size = in.readVInt(); indicesAndFilters = new HashMap<>(); for (int i = 0; i < size; i++) { String index = in.readString(); AliasFilter aliasFilter = new AliasFilter(in); indicesAndFilters.put(index, aliasFilter); } } else { indicesAndFilters = null; } }
public NodeAllocationResult(StreamInput in) throws IOException { node = new DiscoveryNode(in); shardStoreInfo = in.readOptionalWriteable(ShardStoreInfo::new); if (in.getVersion().before(Version.V_5_2_1)) { canAllocateDecision = Decision.readFrom(in); } else { canAllocateDecision = in.readOptionalWriteable(Decision::readFrom); } nodeDecision = AllocationDecision.readFrom(in); weightRanking = in.readVInt(); }
@Override public void readFrom(StreamInput in) throws IOException { node = new DiscoveryNode(in); if (in.getVersion().before(Version.V_6_0_0_alpha1)) { // legacy version in.readLong(); } allocationId = in.readOptionalString(); allocationStatus = AllocationStatus.readFrom(in); if (in.readBoolean()) { storeException = in.readException(); } }
private static DiscoveryNode maybeAddProxyAddress(String proxyAddress, DiscoveryNode node) { if (proxyAddress == null || proxyAddress.isEmpty()) { return node; } else { // resovle proxy address lazy here InetSocketAddress proxyInetAddress = RemoteClusterAware.parseSeedAddress(proxyAddress); return new DiscoveryNode(node.getName(), node.getId(), node.getEphemeralId(), node.getHostName(), node .getHostAddress(), new TransportAddress(proxyInetAddress), node.getAttributes(), node.getRoles(), node.getVersion()); } }
public static DiscoveryNodes readFrom(StreamInput in, DiscoveryNode localNode) throws IOException { Builder builder = new Builder(); if (in.readBoolean()) { builder.masterNodeId(in.readString()); } if (localNode != null) { builder.localNodeId(localNode.getId()); } int size = in.readVInt(); for (int i = 0; i < size; i++) { DiscoveryNode node = new DiscoveryNode(in); if (localNode != null && node.getId().equals(localNode.getId())) { // reuse the same instance of our address and local node id for faster equality node = localNode; } // some one already built this and validated it's OK, skip the n2 scans assert builder.validateAdd(node) == null : "building disco nodes from network doesn't pass preflight: " + builder.validateAdd(node); builder.putUnsafe(node); } return builder.build(); }
@Override public synchronized void readFrom(StreamInput in) throws IOException { timer.readFrom(in); stage = Stage.fromId(in.readByte()); shardId = ShardId.readShardId(in); recoverySource = RecoverySource.readFrom(in); targetNode = new DiscoveryNode(in); sourceNode = in.readOptionalWriteable(DiscoveryNode::new); index.readFrom(in); translog.readFrom(in); verifyIndex.readFrom(in); primary = in.readBoolean(); }