/** * @return {@code True} If partition changes triggered by receiving Single/Full messages are not finished yet. */ public boolean partitionChangesInProgress() { ClusterNode crd0 = crd; if (crd0 == null) return false; return crd0.equals(cctx.localNode()) ? !partitionsSent : !partitionsReceived; }
/** {@inheritDoc} */ @Override protected boolean updateNearCache(GridCacheContext cacheCtx, KeyCacheObject key, AffinityTopologyVersion topVer) { if (!cacheCtx.isDht() || !isNearEnabled(cacheCtx) || cctx.localNodeId().equals(nearNodeId)) return false; if (cacheCtx.config().getBackups() == 0) return true; // Check if we are on the backup node. return !cacheCtx.affinity().backupsByKey(key, topVer).contains(cctx.localNode()); }
/** * @param desc Cache group descriptor. * @param aff Affinity. * @param fut Exchange future. * @return {@code True} if local node can calculate affinity on it's own for this partition map exchange. */ private boolean canCalculateAffinity(CacheGroupDescriptor desc, GridAffinityAssignmentCache aff, GridDhtPartitionsExchangeFuture fut) { assert desc != null : aff.cacheOrGroupName(); // Do not request affinity from remote nodes if affinity function is not centralized. if (!aff.centralizedAffinityFunction()) return true; // If local node did not initiate exchange or local node is the only cache node in grid. Collection<ClusterNode> affNodes = fut.events().discoveryCache().cacheGroupAffinityNodes(aff.groupId()); return fut.cacheGroupAddedOnExchange(aff.groupId(), desc.receivedFrom()) || !fut.exchangeId().nodeId().equals(cctx.localNodeId()) || (affNodes.isEmpty() || (affNodes.size() == 1 && affNodes.contains(cctx.localNode()))); }
/** * @param part Partition. * @param aff Affinity assignments. * @return {@code True} if given partition belongs to local node. */ private boolean localNode(int part, List<List<ClusterNode>> aff) { return aff.get(part).contains(ctx.localNode()); }
/** * @param p Partition number. * @param topVer Topology version. * @return {@code True} if given partition belongs to local node. */ private boolean partitionLocalNode(int p, AffinityTopologyVersion topVer) { return grp.affinity().nodes(p, topVer).contains(ctx.localNode()); }
/** * @param topVer Topology version. * @return {@code True} if local node is primary for this partition. */ public boolean primary(AffinityTopologyVersion topVer) { List<ClusterNode> nodes = grp.affinity().cachedAffinity(topVer).get(id); return !nodes.isEmpty() && ctx.localNode().equals(nodes.get(0)); }
/** * @param originTx Transaction for copy. * @param req Request. * @return Prepare future. */ public IgniteInternalFuture<GridNearTxPrepareResponse> prepareNearTxLocal( final GridNearTxLocal originTx, final GridNearTxPrepareRequest req) { // Make sure not to provide Near entries to DHT cache. req.cloneEntries(); return prepareNearTx(originTx, ctx.localNode(), req); }
/** * @param topVer Topology version. * @return {@code True} if local node is backup for this partition. */ public boolean backup(AffinityTopologyVersion topVer) { List<ClusterNode> nodes = grp.affinity().cachedAffinity(topVer).get(id); return nodes.indexOf(ctx.localNode()) > 0; }
/** * @param topVer Topology version. * @param ids IDs. * @return Nodes. */ private List<ClusterNode> toNodes(AffinityTopologyVersion topVer, List<UUID> ids) { List<ClusterNode> nodes = new ArrayList<>(ids.size()); for (int i = 0; i < ids.size(); i++) { UUID id = ids.get(i); ClusterNode node = cctx.discovery().node(topVer, id); assert node != null : "Failed to get node [id=" + id + ", topVer=" + topVer + ", locNode=" + cctx.localNode() + ", allNodes=" + cctx.discovery().nodes(topVer) + ']'; nodes.add(node); } return nodes; }
/** * @param cctx Context. * @param discoCache Discovery data cache. * @param grpId Group ID. * @param parts Number of partitions in the group. * @param similarAffKey Key to find caches with similar affinity. */ public GridClientPartitionTopology( GridCacheSharedContext<?, ?> cctx, DiscoCache discoCache, int grpId, int parts, Object similarAffKey ) { this.cctx = cctx; this.discoCache = discoCache; this.grpId = grpId; this.similarAffKey = similarAffKey; this.parts = parts; topVer = AffinityTopologyVersion.NONE; log = cctx.logger(getClass()); node2part = new GridDhtPartitionFullMap(cctx.localNode().id(), cctx.localNode().order(), updateSeq.get()); cntrMap = new CachePartitionFullCountersMap(parts); }
/** * @param entry Entry. * @return {@code True} if local node is current primary for given entry. */ private boolean primaryLocal(GridCacheEntryEx entry) { return entry.context().affinity().primaryByPartition(cctx.localNode(), entry.partition(), AffinityTopologyVersion.NONE); }
/** * @return {@code true} if local node is in baseline and {@code false} otherwise. */ private boolean isLocalNodeInBaseline() { BaselineTopology topology = cctx.discovery().discoCache().state().baselineTopology(); return topology != null && topology.consistentIds().contains(cctx.localNode().consistentId()); }
/** * Marks that there are entries, enlisted by query. */ public void markQueryEnlisted() { assert mvccSnapshot != null && txState.mvccEnabled(); if (!qryEnlisted) { qryEnlisted = true; if (!cctx.localNode().isClient()) cctx.coordinators().registerLocalTransaction(mvccSnapshot.coordinatorVersion(), mvccSnapshot.counter()); } }
/** * Process client cache start/close requests, called from exchange thread. * * @param msg Change request. */ void processClientCachesChanges(ClientCacheChangeDummyDiscoveryMessage msg) { AffinityTopologyVersion topVer = cctx.exchange().readyAffinityVersion(); DiscoCache discoCache = cctx.discovery().discoCache(topVer); boolean crd = cctx.localNode().equals(discoCache.oldestAliveServerNode()); Map<Integer, Boolean> startedCaches = processClientCacheStartRequests(msg, crd, topVer, discoCache); Set<Integer> closedCaches = processCacheCloseRequests(msg, crd, topVer); if (startedCaches != null || closedCaches != null) scheduleClientChangeMessage(startedCaches, closedCaches); }
/** * Sends {@link DynamicCacheChangeFailureMessage} to all participated nodes * that represents a cause of exchange failure. */ private void sendExchangeFailureMessage() { assert crd != null && crd.isLocal(); try { IgniteCheckedException err = createExchangeException(exchangeGlobalExceptions); List<String> cacheNames = new ArrayList<>(exchActions.cacheStartRequests().size()); for (ExchangeActions.CacheActionData actionData : exchActions.cacheStartRequests()) cacheNames.add(actionData.request().cacheName()); DynamicCacheChangeFailureMessage msg = new DynamicCacheChangeFailureMessage( cctx.localNode(), exchId, err, cacheNames); if (log.isDebugEnabled()) log.debug("Dynamic cache change failed (send message to all participating nodes): " + msg); cacheChangeFailureMsgSent = true; cctx.discovery().sendCustomEvent(msg); return; } catch (IgniteCheckedException e) { if (reconnectOnError(e)) onDone(new IgniteNeedReconnectException(cctx.localNode(), e)); else onDone(e); } }
/** * @return Initial exchange ID. */ private GridDhtPartitionExchangeId initialExchangeId() { DiscoveryEvent discoEvt = cctx.discovery().localJoinEvent(); assert discoEvt != null; final AffinityTopologyVersion startTopVer = affinityTopologyVersion(discoEvt); assert discoEvt.topologyVersion() == startTopVer.topologyVersion(); return exchangeId(cctx.localNode().id(), startTopVer, discoEvt); }
/** {@inheritDoc} */ @Override public GridDhtPartitionFullMap partitionMap(boolean onlyActive) { lock.readLock().lock(); try { if (node2part == null || stopping) return null; assert node2part.valid() : "Invalid node2part [node2part=" + node2part + ", grp=" + grp.cacheOrGroupName() + ", stopping=" + stopping + ", locNodeId=" + ctx.localNode().id() + ", locName=" + ctx.igniteInstanceName() + ']'; GridDhtPartitionFullMap m = node2part; return new GridDhtPartitionFullMap(m.nodeId(), m.nodeOrder(), m.updateSequence(), m, onlyActive); } finally { lock.readLock().unlock(); } }
/** * */ private void onAllServersLeft() { assert cctx.kernalContext().clientNode() : cctx.localNode(); List<ClusterNode> empty = Collections.emptyList(); for (CacheGroupContext grp : cctx.cache().cacheGroups()) { List<List<ClusterNode>> affAssignment = new ArrayList<>(grp.affinity().partitions()); for (int i = 0; i < grp.affinity().partitions(); i++) affAssignment.add(empty); grp.affinity().idealAssignment(affAssignment); grp.affinity().initialize(initialVersion(), affAssignment); cctx.exchange().exchangerUpdateHeartbeat(); } }
/** {@inheritDoc} */ @Override public void beforeExchange(GridDhtPartitionsExchangeFuture exchFut, boolean initParts, boolean updateMoving) throws IgniteCheckedException { ClusterNode loc = cctx.localNode(); U.writeLock(lock); try { if (stopping) return; discoCache = exchFut.events().discoveryCache(); beforeExchange0(loc, exchFut); if (updateMoving) { ExchangeDiscoveryEvents evts = exchFut.context().events(); GridAffinityAssignmentCache aff = cctx.affinity().affinity(grpId); assert aff.lastVersion().equals(evts.topologyVersion()); createMovingPartitions(aff.readyAffinity(evts.topologyVersion())); } } finally { lock.writeLock().unlock(); } }
/** * @param oldestNode Oldest node. Target node to send message to. */ private void sendPartitions(ClusterNode oldestNode) { try { sendLocalPartitions(oldestNode); } catch (ClusterTopologyCheckedException ignore) { if (log.isDebugEnabled()) log.debug("Coordinator left during partition exchange [nodeId=" + oldestNode.id() + ", exchId=" + exchId + ']'); } catch (IgniteCheckedException e) { if (reconnectOnError(e)) onDone(new IgniteNeedReconnectException(cctx.localNode(), e)); else { U.error(log, "Failed to send local partitions to coordinator [crd=" + oldestNode.id() + ", exchId=" + exchId + ']', e); } } }