@Override public boolean apply(ClusterNode n) { return !n.isDaemon(); } };
/** {@inheritDoc} */ @Override public boolean apply(ClusterNode n) { return n.isDaemon(); } }
/** * @param n Node. * @return {@code true} if node holds user information. Otherwise returns {@code false}. */ private static boolean isNodeHoldsUsers(ClusterNode n) { return !n.isClient() && !n.isDaemon(); }
/** * @param node Node to check. * @return {@code True} if client cache is present on the given nodes. */ public boolean clientNode(ClusterNode node) { if (node.isDaemon()) return false; Boolean near = cacheClientNode(node); return near != null && !near; }
/** * @param node Node to check. * @return {@code True} if cache is accessible on the given node. */ boolean cacheNode(ClusterNode node) { return !node.isDaemon() && (CU.affinityNode(node, aff.cacheFilter) || cacheClientNode(node) != null); }
/** * @param node Node. * @param filter Node filter. * @return {@code True} if node is not client node and pass given filter. */ public static boolean affinityNode(ClusterNode node, IgnitePredicate<ClusterNode> filter) { return !node.isDaemon() && !node.isClient() && filter.apply(node); }
/** {@inheritDoc} */ @Override public Collection<ClusterNode> remoteDaemonNodes() { Collection<ClusterNode> daemons = new ArrayList<>(); for (ClusterNode node : rmtNodes) { if (node.isDaemon()) daemons.add(node); } return daemons; }
/** * @param node Sender node. * @param msg Full partition info. */ public void onReceiveFullMessage(final ClusterNode node, final GridDhtPartitionsFullMessage msg) { assert msg != null; assert msg.exchangeId() != null : msg; assert !node.isDaemon() : node; initFut.listen(new CI1<IgniteInternalFuture<Boolean>>() { @Override public void apply(IgniteInternalFuture<Boolean> f) { try { if (!f.get()) return; } catch (IgniteCheckedException e) { U.error(log, "Failed to initialize exchange future: " + this, e); return; } processFullMessage(true, node, msg); } }); }
/** * Short node representation. * * @param n Grid node. * @return Short string representing the node. */ public static String toShortString(ClusterNode n) { return "ClusterNode [id=" + n.id() + ", order=" + n.order() + ", addr=" + n.addresses() + ", daemon=" + n.isDaemon() + ']'; }
/** {@inheritDoc} */ @Override public int getTotalBaselineNodes() { if (!node.isClient() && !node.isDaemon()) { List<? extends BaselineNode> baselineNodes = discoMgr.baselineNodes(discoMgr.topologyVersionEx()); if (baselineNodes != null) for (BaselineNode baselineNode : baselineNodes) if (baselineNode.consistentId().equals(node.consistentId())) return 1; } return 0; }
@Override public Collection<ClusterNode> remoteDaemonNodes() { final Collection<ClusterNode> all = ctx.discovery().daemonNodes(); return !localNode().isDaemon() ? all : F.view(all, new IgnitePredicate<ClusterNode>() { @Override public boolean apply(ClusterNode n) { return n.isDaemon(); } }); }
/** {@inheritDoc} */ @Override public boolean isDaemon() { ClusterNode locNode0 = localNode(); return locNode0 != null ? locNode0.isDaemon() : (config().isDaemon() || IgniteSystemProperties.getBoolean(IGNITE_DAEMON)); }
/** * Gets alive server nodes from disco cache for provided AffinityTopologyVersion. * * @param topVer Topology version. * @return Collection of nodes with at least one cache configured. */ private Collection<ClusterNode> aliveNodesForTopologyVer(AffinityTopologyVersion topVer) { if (topVer == AffinityTopologyVersion.NONE) return discovery.aliveServerNodes(); else { Collection<ClusterNode> histNodes = discovery.topology(topVer.topologyVersion()); if (histNodes != null) return histNodes.stream().filter(n -> !n.isClient() && !n.isDaemon() && discovery.alive(n)) .collect(Collectors.toList()); else throw new IgniteException("Topology " + topVer + " not found in discovery history " + "; consider increasing IGNITE_DISCOVERY_HISTORY_SIZE property. Current value is " + IgniteSystemProperties.getInteger(IgniteSystemProperties.IGNITE_DISCOVERY_HISTORY_SIZE, -1)); } }
/** * @param node Sender node. * @param msg Message with full partition info. */ public void onReceivePartitionRequest(final ClusterNode node, final GridDhtPartitionsSingleRequest msg) { assert !cctx.kernalContext().clientNode() || msg.restoreState(); assert !node.isDaemon() && !node.isClient() : node; initFut.listen(new CI1<IgniteInternalFuture<Boolean>>() { @Override public void apply(IgniteInternalFuture<Boolean> fut) { processSinglePartitionRequest(node, msg); } }); }
/** {@inheritDoc} */ @Override public boolean isNodeInBaseline() { ClusterNode locNode = localNode(); if (locNode.isClient() || locNode.isDaemon()) return false; DiscoveryDataClusterState clusterState = ctx.state().clusterState(); return clusterState.hasBaselineTopology() && CU.baselineNode(locNode, clusterState); }
/** {@inheritDoc} */ @Override protected void beforeTestsStarted() throws Exception { ignite = startGrid(0); daemonNode = true; daemon = startGrid(1); assert ((IgniteKernal)daemon).localNode().isDaemon(); }
/** * IMPORTANT! * Only purpose of this constructor is creating node which contains necessary data to store on disc only * @param node to copy data from */ public TcpDiscoveryNode( ClusterNode node ) { this.id = node.id(); this.consistentId = node.consistentId(); this.addrs = node.addresses(); this.hostNames = node.hostNames(); this.order = node.order(); this.ver = node.version(); this.daemon = node.isDaemon(); this.clientRouterNodeId = node.isClient() ? node.id() : null; attrs = Collections.singletonMap(ATTR_NODE_CONSISTENT_ID, consistentId); } }
/** * @param de Discovery event. * @param type Event's type. * @param id Event id. * @param name Event name. * @param nid Event node ID. * @param ts Event timestamp. * @param msg Event message. * @param shortDisplay Shortened version of {@code toString()} result. * @return Visor data transfer object for event. */ protected VisorGridEvent discoveryEvent(DiscoveryEvent de, int type, IgniteUuid id, String name, UUID nid, long ts, String msg, String shortDisplay) { ClusterNode node = de.eventNode(); return new VisorGridDiscoveryEvent(type, id, name, nid, ts, msg, shortDisplay, node.id(), F.first(node.addresses()), node.isDaemon(), de.topologyVersion()); }
/** * @param rmt Remote node to check. * @throws IgniteCheckedException If check failed. */ private void checkRebalanceConfiguration(ClusterNode rmt) throws IgniteCheckedException { ClusterNode locNode = ctx.discovery().localNode(); if (ctx.config().isClientMode() || locNode.isDaemon() || rmt.isClient() || rmt.isDaemon()) return; Integer rebalanceThreadPoolSize = rmt.attribute(IgniteNodeAttributes.ATTR_REBALANCE_POOL_SIZE); if (rebalanceThreadPoolSize != null && rebalanceThreadPoolSize != ctx.config().getRebalanceThreadPoolSize()) { throw new IgniteCheckedException("Rebalance configuration mismatch (fix configuration or set -D" + IGNITE_SKIP_CONFIGURATION_CONSISTENCY_CHECK + "=true system property)." + " Different values of such parameter may lead to rebalance process instability and hanging. " + " [rmtNodeId=" + rmt.id() + ", locRebalanceThreadPoolSize = " + ctx.config().getRebalanceThreadPoolSize() + ", rmtRebalanceThreadPoolSize = " + rebalanceThreadPoolSize + "]"); } }
/** {@inheritDoc} */ @Override public void addNode(ClusterNode node) { if (node == null || sentNodes.contains(node.id())) return; // Send node info to the native platform try (PlatformMemory mem0 = mem.allocate()) { PlatformOutputStream out = mem0.output(); BinaryRawWriterEx w = writer(out); w.writeUuid(node.id()); PlatformUtils.writeNodeAttributes(w, node.attributes()); w.writeCollection(node.addresses()); w.writeCollection(node.hostNames()); w.writeLong(node.order()); w.writeBoolean(node.isLocal()); w.writeBoolean(node.isDaemon()); w.writeBoolean(node.isClient()); w.writeObjectDetached(node.consistentId()); writeClusterMetrics(w, node.metrics()); out.synchronize(); gateway().nodeInfo(mem0.pointer()); } sentNodes.add(node.id()); }