/** * Get the local node * * @return local node */ public DiscoveryNode getLocalNode() { return localNode(); }
membership.sendLeaveRequestBlocking(nodes.masterNode(), nodes.localNode(), TimeValue.timeValueSeconds(1)); } catch (Exception e) { logger.debug("failed to send leave request to master [{}]", e, nodes.masterNode()); if (nodes.localNode().equals(possibleMaster)) { continue; membership.sendLeaveRequest(nodes.localNode(), possibleMaster); } catch (Exception e) { logger.debug("failed to send leave request from master [{}] to possible master [{}]", e, nodes.masterNode(), possibleMaster);
private void applyNewIndices(final ClusterChangedEvent event) { // we only create indices for shards that are allocated RoutingNodes.RoutingNodeIterator routingNode = event.state().getRoutingNodes().routingNodeIter(event.state().nodes().localNodeId()); if (routingNode == null) { return; } for (ShardRouting shard : routingNode) { if (!indicesService.hasIndex(shard.index())) { final IndexMetaData indexMetaData = event.state().metaData().index(shard.index()); if (logger.isDebugEnabled()) { logger.debug("[{}] creating index", indexMetaData.getIndex()); } try { indicesService.createIndex(indexMetaData.getIndex(), indexMetaData.getSettings(), event.state().nodes().localNode().id()); } catch (Throwable e) { sendFailShard(shard, indexMetaData.getIndexUUID(), "failed to create index", e); } } } }
@Override public DiscoveryNode localNode() { return clusterState.getNodes().localNode(); }
@Override public DiscoveryNodes readFrom(StreamInput in) throws IOException { return readFrom(in, localNode()); }
@Override public ClusterState readFrom(StreamInput in) throws IOException { return readFrom(in, nodes.localNode()); }
public String prettyPrint() { StringBuilder sb = new StringBuilder(); sb.append("nodes: \n"); for (DiscoveryNode node : this) { sb.append(" ").append(node); if (node == localNode()) { sb.append(", local"); } if (node == masterNode()) { sb.append(", master"); } sb.append("\n"); } return sb.toString(); }
private static List<ShardRouting> collectAttributeShards(AttributesKey key, DiscoveryNodes nodes, ArrayList<ShardRouting> from) { final ArrayList<ShardRouting> to = new ArrayList<>(); for (final String attribute : key.attributes) { final String localAttributeValue = nodes.localNode().attributes().get(attribute); if (localAttributeValue != null) { for (Iterator<ShardRouting> iterator = from.iterator(); iterator.hasNext(); ) { ShardRouting fromShard = iterator.next(); final DiscoveryNode discoveryNode = nodes.get(fromShard.currentNodeId()); if (discoveryNode == null) { iterator.remove(); // node is not present anymore - ignore shard } else if (localAttributeValue.equals(discoveryNode.attributes().get(attribute))) { iterator.remove(); to.add(fromShard); } } } } return Collections.unmodifiableList(to); }
private PingResponse createPingResponse(DiscoveryNodes discoNodes) { return new PingResponse(discoNodes.localNode(), discoNodes.masterNode(), clusterName, contextProvider.nodeHasJoinedClusterOnce()); }
public static Set<String> getRelevantIndices(ClusterState state, ClusterState previousState,ImmutableSet<String> previouslyWrittenIndices) { Set<String> relevantIndices; if (isDataOnlyNode(state)) { relevantIndices = getRelevantIndicesOnDataOnlyNode(state, previousState, previouslyWrittenIndices); } else if (state.nodes().localNode().masterNode() == true) { relevantIndices = getRelevantIndicesForMasterEligibleNode(state); } else { relevantIndices = Collections.emptySet(); } return relevantIndices; }
@Override public void clusterChanged(ClusterChangedEvent event) { boolean cleanCache = false; DiscoveryNode localNode = event.state().nodes().localNode(); if (localNode != null) { if (localNode.masterNode() == true && event.localNodeMaster() == false) { cleanCache = true; } } else { cleanCache = true; } if (cleanCache) { Releasables.close(asyncFetchStarted.values()); asyncFetchStarted.clear(); Releasables.close(asyncFetchStore.values()); asyncFetchStore.clear(); } } });
public void publish(ClusterChangedEvent clusterChangedEvent, final Discovery.AckListener ackListener) { Set<DiscoveryNode> nodesToPublishTo = new HashSet<>(clusterChangedEvent.state().nodes().size()); DiscoveryNode localNode = nodesProvider.nodes().localNode(); for (final DiscoveryNode node : clusterChangedEvent.state().nodes()) { if (node.equals(localNode)) { continue; } nodesToPublishTo.add(node); } publish(clusterChangedEvent, nodesToPublishTo, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener)); }
@Override protected NodeImportResponse nodeOperation(NodeImportRequest request) throws ElasticSearchException { ImportContext context = new ImportContext(nodePath); BytesReference source = request.source(); importParser.parseSource(context, source); Importer.Result result = importer.execute(context, request); return new NodeImportResponse(clusterService.state().nodes().localNode(), result); }
@Override public void clusterChanged(ClusterChangedEvent event) { if (event.nodesRemoved()) { synchronized (banedParents) { lastDiscoveryNodes = event.state().getNodes(); // Remove all bans that were registered by nodes that are no longer in the cluster state Iterator<TaskId> banIterator = banedParents.keySet().iterator(); while (banIterator.hasNext()) { TaskId taskId = banIterator.next(); if (lastDiscoveryNodes.nodeExists(taskId.getNodeId()) == false) { logger.debug("Removing ban for the parent [{}] on the node [{}], reason: the parent node is gone", taskId, event.state().getNodes().localNode()); banIterator.remove(); } } } // Cancel cancellable tasks for the nodes that are gone for (Map.Entry<Long, CancellableTaskHolder> taskEntry : cancellableTasks.entrySet()) { CancellableTaskHolder holder = taskEntry.getValue(); CancellableTask task = holder.getTask(); TaskId parentTaskId = task.getParentTaskId(); if (parentTaskId.isSet() && lastDiscoveryNodes.nodeExists(parentTaskId.getNodeId()) == false) { if (task.cancelOnParentLeaving()) { holder.cancel("Coordinating node [" + parentTaskId.getNodeId() + "] left the cluster"); } } } } }
public void nodeIndexDeleted(final ClusterState clusterState, final String index, final Settings indexSettings, final String nodeId) { final DiscoveryNodes nodes = clusterState.nodes(); transportService.sendRequest(clusterState.nodes().masterNode(), INDEX_DELETED_ACTION_NAME, new NodeIndexDeletedMessage(index, nodeId), EmptyTransportResponseHandler.INSTANCE_SAME); if (nodes.localNode().isDataNode() == false) { logger.trace("[{}] not acking store deletion (not a data node)", index); return; } threadPool.generic().execute(new AbstractRunnable() { @Override public void onFailure(Throwable t) { logger.warn("[{}] failed to ack index store deleted for index", t, index); } @Override protected void doRun() throws Exception { lockIndexAndAck(index, nodes, nodeId, clusterState, indexSettings); } }); }
/** * Deletes the index store trying to acquire all shards locks for this index. * This method will delete the metadata for the index even if the actual shards can't be locked. */ public void deleteIndexStore(String reason, IndexMetaData metaData, ClusterState clusterState, boolean closed) throws IOException { if (nodeEnv.hasNodeFile()) { synchronized (this) { String indexName = metaData.getIndex(); if (indices.containsKey(indexName)) { String localUUid = indices.get(indexName).getIndexService().indexUUID(); throw new IllegalStateException("Can't delete index store for [" + indexName + "] - it's still part of the indices service [" + localUUid + "] [" + metaData.getIndexUUID() + "]"); } if (clusterState.metaData().hasIndex(indexName) && (clusterState.nodes().localNode().masterNode() == true)) { // we do not delete the store if it is a master eligible node and the index is still in the cluster state // because we want to keep the meta data for indices around even if no shards are left here final IndexMetaData index = clusterState.metaData().index(indexName); throw new IllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]"); } } Index index = new Index(metaData.getIndex()); final Settings indexSettings = buildIndexSettings(metaData); deleteIndexStore(reason, index, indexSettings, closed); } }
String localNodeId = state.getNodes().localNode().id(); if (localNodeId.equals(shardRouting.currentNodeId()) || localNodeId.equals(shardRouting.relocatingNodeId())) { return false;
logger.debug("{} deleted shard reason [{}]", shardId, reason); if (clusterState.nodes().localNode().isMasterNode() == false && // master nodes keep the index meta data, even if having no shards.. canDeleteIndexContents(shardId.index(), indexSettings, false)) { if (nodeEnv.findAllShardIds(shardId.index()).isEmpty()) {
@Override public ClusterState execute(ClusterState currentState) { // Take into account the previous known nodes, if they happen not to be available // then fault detection will remove these nodes. if (currentState.nodes().masterNode() != null) { // TODO can we tie break here? we don't have a remote master cluster state version to decide on logger.trace("join thread elected local node as master, but there is already a master in place: {}", currentState.nodes().masterNode()); throw new NotMasterException("Node [" + clusterService.localNode() + "] not master for join request"); } DiscoveryNodes.Builder builder = new DiscoveryNodes.Builder(currentState.nodes()).masterNodeId(currentState.nodes().localNode().id()); // update the fact that we are the master... ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(discoverySettings.getNoMasterBlock()).build(); currentState = ClusterState.builder(currentState).nodes(builder).blocks(clusterBlocks).build(); // reroute now to remove any dead nodes (master may have stepped down when they left and didn't update the routing table) RoutingAllocation.Result result = routingService.getAllocationService().reroute(currentState, "nodes joined"); if (result.changed()) { currentState = ClusterState.builder(currentState).routingResult(result).build(); } // Add the incoming join requests. // Note: we only do this now (after the reroute) to avoid assigning shards to these nodes. return super.execute(currentState); }