/** {@inheritDoc} */ @Override public boolean rebalanceRequired(AffinityTopologyVersion rebTopVer, GridDhtPartitionsExchangeFuture exchFut) { if (ctx.kernalContext().clientNode() || rebTopVer.equals(AffinityTopologyVersion.NONE)) return false; // No-op. if (exchFut.resetLostPartitionFor(grp.cacheOrGroupName())) return true; if (exchFut.localJoinExchange()) return true; // Required, can have outdated updSeq partition counter if node reconnects. if (!grp.affinity().cachedVersions().contains(rebTopVer)) { assert rebTopVer.compareTo(grp.localStartVersion()) <= 0 : "Empty hisroty allowed only for newly started cache group [rebTopVer=" + rebTopVer + ", localStartTopVer=" + grp.localStartVersion() + ']'; return true; // Required, since no history info available. } final IgniteInternalFuture<Boolean> rebFut = rebalanceFuture(); if (rebFut.isDone() && !rebFut.result()) return true; // Required, previous rebalance cancelled. AffinityTopologyVersion lastAffChangeTopVer = ctx.exchange().lastAffinityChangedTopologyVersion(exchFut.topologyVersion()); return lastAffChangeTopVer.compareTo(rebTopVer) > 0; }
/** * */ private abstract class MessageHandler<M> implements IgniteBiInClosure<UUID, M> { /** */ private static final long serialVersionUID = 0L; /** * @param nodeId Sender node ID. * @param msg Message. */ @Override public void apply(UUID nodeId, M msg) { ClusterNode node = cctx.node(nodeId); if (node == null) { if (log.isTraceEnabled()) log.trace("Received message from failed node [node=" + nodeId + ", msg=" + msg + ']'); return; } if (log.isTraceEnabled()) log.trace("Received message from node [node=" + nodeId + ", msg=" + msg + ']'); onMessage(node, msg); } /** * @param node Sender cluster node. * @param msg Message. */ protected abstract void onMessage(ClusterNode node, M msg); }
CacheGroupContext grp = cctx.cache().cacheGroup(grpDesc.groupId()); if (grp != null && !grp.isLocal() && grp.localStartVersion().equals(fut.initialVersion())) { assert grp.affinity().lastVersion().equals(AffinityTopologyVersion.NONE) : grp.affinity().lastVersion();
grp.localStartVersion().compareTo(entry.getValue().topologyVersion()) > 0) continue;
assert grp.localStartVersion().equals(topVer) : grp.localStartVersion();
boolean updateTop = exchId.topologyVersion().equals(grp.localStartVersion());
cacheGroup.offheap().restorePartitionStates(Collections.emptyMap()); if (cacheGroup.localStartVersion().equals(fut.initialVersion())) cacheGroup.topology().afterStateRestored(fut.initialVersion());
if (locJoin.joinTopologyVersion().equals(grp.localStartVersion())) grp.preloader().onInitialExchangeComplete(null);
/** {@inheritDoc} */ @Override public boolean rebalanceRequired(AffinityTopologyVersion rebTopVer, GridDhtPartitionsExchangeFuture exchFut) { if (ctx.kernalContext().clientNode() || rebTopVer.equals(AffinityTopologyVersion.NONE)) return false; // No-op. if (exchFut.localJoinExchange()) return true; // Required, can have outdated updSeq partition counter if node reconnects. if (!grp.affinity().cachedVersions().contains(rebTopVer)) { assert rebTopVer.compareTo(grp.localStartVersion()) <= 0 : "Empty hisroty allowed only for newly started cache group [rebTopVer=" + rebTopVer + ", localStartTopVer=" + grp.localStartVersion() + ']'; return true; // Required, since no history info available. } final IgniteInternalFuture<Boolean> rebFut = rebalanceFuture(); if (rebFut.isDone() && !rebFut.result()) return true; // Required, previous rebalance cancelled. final AffinityTopologyVersion exchTopVer = exchFut.context().events().topologyVersion(); Collection<UUID> aliveNodes = ctx.discovery().aliveServerNodes().stream() .map(ClusterNode::id) .collect(Collectors.toList()); return assignmentsChanged(rebTopVer, exchTopVer) || !aliveNodes.containsAll(demander.remainingNodes()); // Some of nodes left before rabalance compelete. }
grp.localStartVersion().compareTo(entry.getValue().topologyVersion()) > 0) continue;
if (!grp.isLocal()) { if (exchId != null) { AffinityTopologyVersion startTopVer = grp.localStartVersion();
assert grp.localStartVersion().equals(topVer) : grp.localStartVersion();
CacheGroupContext grp = cctx.cache().cacheGroup(grpId); if (grp != null && !grp.isLocal() && grp.localStartVersion().equals(fut.initialVersion())) { assert grp.affinity().lastVersion().equals(AffinityTopologyVersion.NONE) : grp.affinity().lastVersion();
boolean updateTop = exchId.topologyVersion().equals(grp.localStartVersion());
if (locJoin.joinTopologyVersion().equals(grp.localStartVersion())) grp.preloader().onInitialExchangeComplete(null);