/** * @return {@code true} in case affinity node. */ public boolean affinityNode() { return cctx != null && cctx.affinityNode(); }
/** * @return {@code true} If this is a replicated cache and we are on a data node. */ public boolean isReplicatedAffinityNode() { return isReplicated() && affinityNode(); }
/** {@inheritDoc} */ @Override public List<GridCacheClearAllRunnable<K, V>> splitClearLocally(boolean srv, boolean near, boolean readers) { return ctx.affinityNode() ? super.splitClearLocally(srv, near, readers) : Collections.<GridCacheClearAllRunnable<K, V>>emptyList(); }
/** * @param nodeId Sender ID. * @param req Request. */ private void processNearUnlockRequest(UUID nodeId, GridNearUnlockRequest req) { assert ctx.affinityNode(); assert nodeId != null; removeLocks(nodeId, req.version(), req.keys(), true); }
/** {@inheritDoc} */ @Override public List<GridCacheClearAllRunnable<K, V>> splitClearLocally(boolean srv, boolean near, boolean readers) { assert configuration().getNearConfiguration() != null; if (ctx.affinityNode()) { GridCacheVersion obsoleteVer = ctx.versions().next(); List<GridCacheClearAllRunnable<K, V>> dhtJobs = dht().splitClearLocally(srv, near, readers); List<GridCacheClearAllRunnable<K, V>> res = new ArrayList<>(dhtJobs.size()); for (GridCacheClearAllRunnable<K, V> dhtJob : dhtJobs) res.add(new GridNearCacheClearAllRunnable<>(this, obsoleteVer, dhtJob)); return res; } else return super.splitClearLocally(srv, near, readers); }
/** */ private boolean isLocalBackup(EnlistOperation op, KeyCacheObject key) { if (!cctx.affinityNode() || op == EnlistOperation.LOCK) return false; else if (cctx.isReplicated()) return true; return cctx.topology().nodes(key.partition(), tx.topologyVersion()).indexOf(cctx.localNode()) > 0; }
/** */ private boolean isLocalBackup(EnlistOperation op, KeyCacheObject key) { if (!cctx.affinityNode() || op == EnlistOperation.LOCK) return false; else if (cctx.isReplicated()) return true; return cctx.topology().nodes(key.partition(), tx.topologyVersion()).contains(cctx.localNode()); }
/** * @param c Cache. * @param client If it must be a client cache. */ private void assertClient(IgniteCache<?,?> c, boolean client) { assertTrue(((IgniteCacheProxy)c).context().affinityNode() == !client); }
/** * @param topVer Topology version. * @return Nodes where set data request should be sent. * @throws IgniteCheckedException If all cache nodes left grid. */ private Collection<ClusterNode> dataNodes(AffinityTopologyVersion topVer) throws IgniteCheckedException { assert ctx.isPartitioned() || collocated : "Non-collocated mode is supported only for PARTITIONED caches."; if (ctx.isLocal() || (ctx.isReplicated() && ctx.affinityNode())) return Collections.singleton(ctx.localNode()); Collection<ClusterNode> nodes; if (collocated) { List<ClusterNode> nodes0 = ctx.affinity().nodesByPartition(hdrPart, topVer); nodes = !nodes0.isEmpty() ? Collections.singleton(nodes0.contains(ctx.localNode()) ? ctx.localNode() : F.first(nodes0)) : nodes0; } else nodes = CU.affinityNodes(ctx, topVer); if (nodes.isEmpty()) throw new IgniteCheckedException("Failed to get set data, all cache nodes left grid."); return nodes; }
/** * @param cctx Cache context. * @throws IgniteCheckedException If failed. */ private void startQuery(GridCacheContext cctx) throws IgniteCheckedException { if (!qryIdMap.containsKey(cctx.cacheId())) { synchronized (this) { if (!qryIdMap.containsKey(cctx.cacheId())) { qryIdMap.put(cctx.cacheId(), cctx.continuousQueries().executeInternalQuery( new DataStructuresEntryListener(), new DataStructuresEntryFilter(), cctx.isReplicated() && cctx.affinityNode(), false, false, true )); } } } }
/** * @param cachesToClose Caches to close. * @param retClientCaches {@code True} if return IDs of closed client caches. * @return Closed client caches' IDs. */ Set<Integer> closeCaches(Set<String> cachesToClose, boolean retClientCaches) { Set<Integer> ids = null; for (String cacheName : cachesToClose) { completeProxyInitialize(cacheName); blockGateway(cacheName, false, false); GridCacheContext ctx = sharedCtx.cacheContext(CU.cacheId(cacheName)); if (ctx == null) continue; if (retClientCaches && !ctx.affinityNode()) { if (ids == null) ids = U.newHashSet(cachesToClose.size()); ids.add(ctx.cacheId()); } closeCache(ctx); } return ids; }
/** {@inheritDoc} */ @Override protected Collection<VisorCacheMetrics> run(final VisorCacheMetricsCollectorTaskArg arg) { assert arg != null; boolean showSysCaches = arg.isShowSystemCaches(); Collection<String> cacheNames = arg.getCacheNames(); assert cacheNames != null; GridCacheProcessor cacheProcessor = ignite.context().cache(); Collection<IgniteCacheProxy<?, ?>> caches = cacheProcessor.jcaches(); Collection<VisorCacheMetrics> res = new ArrayList<>(caches.size()); boolean allCaches = cacheNames.isEmpty(); for (IgniteCacheProxy ca : caches) { String cacheName = ca.getName(); if (!VisorTaskUtils.isRestartingCache(ignite, cacheName)) { GridCacheContext ctx = ca.context(); if (ctx.started() && (ctx.affinityNode() || ctx.isNear())) { VisorCacheMetrics cm = new VisorCacheMetrics(ignite, cacheName); if ((allCaches || cacheNames.contains(cacheName)) && (showSysCaches || !cm.isSystem())) res.add(cm); } } } return res; }
/** * Generates next affinity key for local node based on current topology. If previous affinity key maps * on local node, return previous affinity key to prevent unnecessary file map growth. * * @param prevAffKey Affinity key of previous block. * @return Affinity key. */ @SuppressWarnings("ConstantConditions") public IgniteUuid nextAffinityKey(@Nullable IgniteUuid prevAffKey) { // Do not generate affinity key for non-affinity nodes. if (!dataCache.context().affinityNode()) return null; UUID nodeId = igfsCtx.kernalContext().localNodeId(); if (prevAffKey != null && dataCache.affinity().mapKeyToNode(prevAffKey).isLocal()) return prevAffKey; while (true) { IgniteUuid key = new IgniteUuid(nodeId, affKeyGen.getAndIncrement()); if (dataCache.affinity().mapKeyToNode(key).isLocal()) return key; } }
/** {@inheritDoc} */ @Override public boolean localPreloadPartition(int part) throws IgniteCheckedException { if (!ctx.affinityNode()) return false; GridDhtPartitionTopology top = ctx.group().topology(); @Nullable GridDhtLocalPartition p = top.localPartition(part, top.readyTopologyVersion(), false); if (p == null) return false; try { if (!p.reserve() || p.state() != OWNING) return false; p.dataStore().preload(); } finally { p.release(); } return true; }
/** * Releases the partition that was reserved by a call to * {@link #reserveForFastLocalGet(int, AffinityTopologyVersion)}. * * @param part Partition to release. * @param topVer Topology version. */ public void releaseForFastLocalGet(int part, AffinityTopologyVersion topVer) { assert affinityNode(); if (!isReplicated() || group().persistenceEnabled()) { GridDhtLocalPartition locPart = topology().localPartition(part, topVer, false); assert locPart != null && locPart.state() == OWNING : "partition evicted after reserveForFastLocalGet " + "[part=" + part + ", locPart=" + locPart + ", topVer=" + topVer + ']'; locPart.release(); } }
/** {@inheritDoc} */ @Override public void destroy(boolean rmvIdx) { try { if (cctx.affinityNode() && rmvIdx) { assert cctx.shared().database().checkpointLockIsHeldByThread(); for (int i = 0; i < segments.length; i++) { H2Tree tree = segments[i]; tree.destroy(); dropMetaPage(i); } } } catch (IgniteCheckedException e) { throw new IgniteException(e); } finally { super.destroy(rmvIdx); } }
/** {@inheritDoc} */ @Override protected void start0() throws IgniteCheckedException { // Append cache name to the topic. topicPrefix = "CONTINUOUS_QUERY" + (cctx.name() == null ? "" : "_" + cctx.name()); if (cctx.affinityNode()) { cctx.io().addCacheHandler(cctx.cacheId(), CacheContinuousQueryBatchAck.class, new CI2<UUID, CacheContinuousQueryBatchAck>() { @Override public void apply(UUID uuid, CacheContinuousQueryBatchAck msg) { CacheContinuousQueryListener lsnr = lsnrs.get(msg.routineId()); if (lsnr != null) lsnr.cleanupBackupQueue(msg.updateCntrs()); } }); cancelableTask = cctx.time().schedule(new BackupCleaner(lsnrs, cctx.kernalContext()), BACKUP_ACK_FREQ, BACKUP_ACK_FREQ); } }
/** * @param cctx Cache context. * @throws IgniteCheckedException If failed. */ protected void initPendingTree(GridCacheContext cctx) throws IgniteCheckedException { assert !cctx.group().persistenceEnabled(); if (cctx.affinityNode() && cctx.ttl().eagerTtlEnabled() && pendingEntries == null) { String name = "PendingEntries"; long rootPage = allocateForTree(); pendingEntries = new PendingEntriesTree( grp, name, grp.dataRegion().pageMemory(), rootPage, grp.reuseList(), true); } }
/** * @param nodeId Node ID. * @param req Request. */ private void processNearLockRequest(UUID nodeId, GridNearLockRequest req) { assert ctx.affinityNode(); assert nodeId != null; assert req != null; if (txLockMsgLog.isDebugEnabled()) { txLockMsgLog.debug("Received near lock request [txId=" + req.version() + ", inTx=" + req.inTx() + ", node=" + nodeId + ']'); } ClusterNode nearNode = ctx.discovery().node(nodeId); if (nearNode == null) { U.warn(txLockMsgLog, "Received near lock request from unknown node (will ignore) [txId=" + req.version() + ", inTx=" + req.inTx() + ", node=" + nodeId + ']'); return; } processNearLockRequest0(nearNode, req); }
/** * Checks if local reads are allowed for the given partition and reserves the partition when needed. If this * method returns {@code true}, then {@link #releaseForFastLocalGet(int, AffinityTopologyVersion)} method * must be called after the read is completed. * * @param part Partition. * @param topVer Topology version. * @return {@code True} if cache 'get' operation is allowed to get entry locally. */ public boolean reserveForFastLocalGet(int part, AffinityTopologyVersion topVer) { boolean result = affinityNode() && rebalanceEnabled() && checkAndReservePartition(part, topVer); // When persistence is enabled, only reading from partitions with OWNING state is allowed. assert !result || !group().persistenceEnabled() || topology().partitionState(localNodeId(), part) == OWNING : "result=" + result + ", persistenceEnabled=" + group().persistenceEnabled() + ", partitionState=" + topology().partitionState(localNodeId(), part) + ", replicated=" + isReplicated() + ", part=" + part; return result; }