/** * @param f Consumer. * @return Accumulated result for all page stores. */ public long forAllPageStores(ToLongFunction<PageStore> f) { long res = 0; for (CacheGroupContext gctx : cctx.cache().cacheGroups()) res += forGroupPageStores(gctx, f); return res; }
/** {@inheritDoc} */ @Override public void afterLogicalUpdatesApplied( IgniteCacheDatabaseSharedManager mgr, GridCacheDatabaseSharedManager.RestoreLogicalState restoreState) throws IgniteCheckedException { restorePartitionStates(cacheGroups(), restoreState.partitionRecoveryStates()); }
/** * Partition refresh callback. * For coordinator causes {@link GridDhtPartitionsFullMessage FullMessages} send, * for non coordinator - {@link GridDhtPartitionsSingleMessage SingleMessages} send */ public void refreshPartitions() { refreshPartitions(cctx.cache().cacheGroups()); }
/** * Collects non local cache groups. * * @return Collection of non local cache groups. */ private List<CacheGroupContext> nonLocalCacheGroups() { return cctx.cache().cacheGroups().stream() .filter(grp -> !grp.isLocal() && !cacheGroupStopping(grp.groupId())) .collect(Collectors.toList()); }
/** * Creates partitions full message for all cache groups. * * @param compress {@code True} if possible to compress message (properly work only if prepareMarshall/ * finishUnmarshall methods are called). * @param newCntrMap {@code True} if possible to use {@link CachePartitionFullCountersMap}. * @param exchId Non-null exchange ID if message is created for exchange. * @param lastVer Last version. * @param partHistSuppliers Partition history suppliers map. * @param partsToReload Partitions to reload map. * @return Message. */ public GridDhtPartitionsFullMessage createPartitionsFullMessage( boolean compress, boolean newCntrMap, @Nullable final GridDhtPartitionExchangeId exchId, @Nullable GridCacheVersion lastVer, @Nullable IgniteDhtPartitionHistorySuppliersMap partHistSuppliers, @Nullable IgniteDhtPartitionsToReloadMap partsToReload ) { Collection<CacheGroupContext> grps = cctx.cache().cacheGroups(); return createPartitionsFullMessage(compress, newCntrMap, exchId, lastVer, partHistSuppliers, partsToReload, grps); }
/** * Creates partitions single message for all cache groups. * * @param exchangeId Exchange ID. * @param clientOnlyExchange Client exchange flag. * @param sndCounters {@code True} if need send partition update counters. * @param newCntrMap {@code True} if possible to use {@link CachePartitionPartialCountersMap}. * @return Message. */ public GridDhtPartitionsSingleMessage createPartitionsSingleMessage( @Nullable GridDhtPartitionExchangeId exchangeId, boolean clientOnlyExchange, boolean sndCounters, boolean newCntrMap, ExchangeActions exchActions ) { Collection<CacheGroupContext> grps = cctx.cache().cacheGroups(); return createPartitionsSingleMessage(exchangeId, clientOnlyExchange, sndCounters, newCntrMap, exchActions, grps); }
/** * @return Map of group id -> Set of partitions which can be used as suppliers for WAL rebalance. */ private Map<Integer, Set<Integer>> partitionsApplicableForWalRebalance() { Map<Integer, Set<Integer>> res = new HashMap<>(); for (CacheGroupContext grp : cctx.cache().cacheGroups()) { if (grp.isLocal()) continue; for (GridDhtLocalPartition locPart : grp.topology().currentLocalPartitions()) { if (locPart.state() == GridDhtPartitionState.OWNING && locPart.fullSize() > walRebalanceThreshold) res.computeIfAbsent(grp.groupId(), k -> new HashSet<>()).add(locPart.id()); } } return res; }
/** * Method dumps partitions info see {@link #dumpPartitionsInfo(CacheGroupContext, IgniteLogger)} * for all persistent cache groups. * * @param cctx Shared context. * @param log Logger. * @throws IgniteCheckedException If failed. */ private static void dumpPartitionsInfo(GridCacheSharedContext cctx, IgniteLogger log) throws IgniteCheckedException { for (CacheGroupContext grp : cctx.cache().cacheGroups()) { if (grp.isLocal() || !grp.persistenceEnabled()) continue; dumpPartitionsInfo(grp, log); } }
@Override public Long apply() { long freeSpace = 0L; for (CacheGroupContext grpCtx : cctx.cache().cacheGroups()) { if (!grpCtx.dataRegion().config().getName().equals(dataRegName)) continue; assert grpCtx.offheap() instanceof GridCacheOffheapManager; freeSpace += ((GridCacheOffheapManager)grpCtx.offheap()).freeSpace(); } return freeSpace; } };
/** * @param node Node. * @param grpName Cache group name. * @return Cache group. */ private CacheGroupContext cacheGroup(Ignite node, String grpName) { for (CacheGroupContext grp : ((IgniteKernal)node).context().cache().cacheGroups()) { if (grpName.equals(grp.name())) return grp; } return null; }
/** * */ private void onLeft() { for (CacheGroupContext grp : cctx.cache().cacheGroups()) { if (grp.isLocal()) continue; grp.preloader().unwindUndeploys(); cctx.exchange().exchangerUpdateHeartbeat(); } }
/** * Gets filtered group ids. */ private Set<Integer> getCacheGroupIds() { Collection<CacheGroupContext> groups = ignite.context().cache().cacheGroups(); Set<Integer> grpIds = new HashSet<>(); if (F.isEmpty(arg.getExcludeCaches())) { for (CacheGroupContext grp : groups) { if (!grp.systemCache() && !grp.isLocal()) grpIds.add(grp.groupId()); } return grpIds; } for (CacheGroupContext grp : groups) { if (!grp.systemCache() && !grp.isLocal() && !isGrpExcluded(grp)) grpIds.add(grp.groupId()); } return grpIds; }
/** * @param cacheNames Cache names. */ private void resetLostPartitions(Collection<String> cacheNames) { assert !exchCtx.mergeExchanges(); synchronized (cctx.exchange().interruptLock()) { if (Thread.currentThread().isInterrupted()) return; for (CacheGroupContext grp : cctx.cache().cacheGroups()) { if (grp.isLocal()) continue; for (String cacheName : cacheNames) { if (grp.hasCache(cacheName)) { grp.topology().resetLostPartitions(initialVersion()); break; } } } } }
/** * Method checks that there were no rebalance for all caches (excluding sys cache). */ private void checkNoRebalanceAfterRecovery() { int sysCacheGroupId = CU.cacheId(GridCacheUtils.UTILITY_CACHE_NAME); List<Ignite> nodes = G.allGrids(); for (final Ignite node : nodes) { TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(node); Set<Integer> mvccCaches = ((IgniteEx) node).context().cache().cacheGroups().stream() .flatMap(group -> group.caches().stream()) .filter(cache -> cache.config().getAtomicityMode() == CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT) .map(GridCacheContext::groupId) .collect(Collectors.toSet()); List<Integer> rebalancedGroups = spi.recordedMessages(true).stream() .map(msg -> (GridDhtPartitionDemandMessage) msg) .map(GridCacheGroupIdMessage::groupId) .filter(grpId -> grpId != sysCacheGroupId) //TODO: remove following filter when failover for MVCC will be fixed. .filter(grpId -> !mvccCaches.contains(grpId)) .distinct() .collect(Collectors.toList()); Assert.assertTrue("There was unexpected rebalance for some groups" + " [node=" + node.name() + ", groups=" + rebalancedGroups + ']', rebalancedGroups.isEmpty()); } }
/** * */ private void onAllServersLeft() { assert cctx.kernalContext().clientNode() : cctx.localNode(); List<ClusterNode> empty = Collections.emptyList(); for (CacheGroupContext grp : cctx.cache().cacheGroups()) { List<List<ClusterNode>> affAssignment = new ArrayList<>(grp.affinity().partitions()); for (int i = 0; i < grp.affinity().partitions(); i++) affAssignment.add(empty); grp.affinity().idealAssignment(affAssignment); grp.affinity().initialize(initialVersion(), affAssignment); cctx.exchange().exchangerUpdateHeartbeat(); } }
/** * @param node Node. * @param cacheName Cache name. * @return Cache group ID for given cache name. */ protected static final int groupIdForCache(Ignite node, String cacheName) { for (CacheGroupContext grp : ((IgniteKernal)node).context().cache().cacheGroups()) { if (grp.hasCache(cacheName)) return grp.groupId(); } fail("Failed to find group for cache: " + cacheName); return 0; }
/** * Detect lost partitions. * * @param resTopVer Result topology version. */ private void detectLostPartitions(AffinityTopologyVersion resTopVer) { boolean detected = false; synchronized (cctx.exchange().interruptLock()) { if (Thread.currentThread().isInterrupted()) return; for (CacheGroupContext grp : cctx.cache().cacheGroups()) { if (!grp.isLocal()) { // Do not trigger lost partition events on start. boolean event = !localJoinExchange() && !activateCluster(); boolean detectedOnGrp = grp.topology().detectLostPartitions(resTopVer, event ? events().lastEvent() : null); detected |= detectedOnGrp; } } } if (detected) { if (log.isDebugEnabled()) log.debug("Partitions have been scheduled to resend [reason=" + "Lost partitions detect on " + resTopVer + "]"); cctx.exchange().scheduleResendPartitions(); } timeBag.finishGlobalStage("Detect lost partitions"); }
/** * @throws IgniteCheckedException If failed. */ private void initTopologies() throws IgniteCheckedException { cctx.database().checkpointReadLock(); try { if (crd != null) { for (CacheGroupContext grp : cctx.cache().cacheGroups()) { if (grp.isLocal()) continue; grp.topology().beforeExchange(this, !centralizedAff && !forceAffReassignment, false); cctx.exchange().exchangerUpdateHeartbeat(); } } } finally { cctx.database().checkpointReadUnlock(); } }
/** * Total count of allocated pages in page store. */ private long pageStoreAllocatedPages() { IgnitePageStoreManager pageStoreMgr = gridCtx.cache().context().pageStore(); assert pageStoreMgr != null; long totalAllocated = pageStoreMgr.pagesAllocated(MetaStorage.METASTORAGE_CACHE_ID); if (MvccUtils.mvccEnabled(gridCtx)) totalAllocated += pageStoreMgr.pagesAllocated(TxLog.TX_LOG_CACHE_ID); for (CacheGroupContext ctx : gridCtx.cache().cacheGroups()) totalAllocated += pageStoreMgr.pagesAllocated(ctx.groupId()); return totalAllocated; }
/** * */ private void assertCachesAfterStop(IgniteEx igniteEx) { assertNull(igniteEx .context() .cache() .cacheGroup(CU.cacheId(STATIC_CACHE_CACHE_GROUP_NAME))); assertTrue(igniteEx.context().cache().cacheGroups().isEmpty()); for (int i = 0; i < CACHES_COUNT; i++) { assertNull(igniteEx.context().cache().cache(STATIC_CACHE_PREFIX + i)); assertNull(igniteEx.context().cache().internalCache(STATIC_CACHE_PREFIX + i)); } }