/** * @param grp Group. */ private boolean isGrpExcluded(CacheGroupContext grp) { if (arg.getExcludeCaches().contains(grp.name())) return true; for (GridCacheContext cacheCtx : grp.caches()) { if (arg.getExcludeCaches().contains(cacheCtx.name())) return true; } return false; }
for (GridCacheContext ctx : grpCtx.caches()) { Collection<GridQueryTypeDescriptor> types = qry.types(ctx.name());
SortedSet<String> cacheNames = grp.caches().stream() .map(GridCacheContext::name) .collect(Collectors.toCollection(TreeSet::new));
/** * Method checks that there were no rebalance for all caches (excluding sys cache). */ private void checkNoRebalanceAfterRecovery() { int sysCacheGroupId = CU.cacheId(GridCacheUtils.UTILITY_CACHE_NAME); List<Ignite> nodes = G.allGrids(); for (final Ignite node : nodes) { TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(node); Set<Integer> mvccCaches = ((IgniteEx) node).context().cache().cacheGroups().stream() .flatMap(group -> group.caches().stream()) .filter(cache -> cache.config().getAtomicityMode() == CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT) .map(GridCacheContext::groupId) .collect(Collectors.toSet()); List<Integer> rebalancedGroups = spi.recordedMessages(true).stream() .map(msg -> (GridDhtPartitionDemandMessage) msg) .map(GridCacheGroupIdMessage::groupId) .filter(grpId -> grpId != sysCacheGroupId) //TODO: remove following filter when failover for MVCC will be fixed. .filter(grpId -> !mvccCaches.contains(grpId)) .distinct() .collect(Collectors.toList()); Assert.assertTrue("There was unexpected rebalance for some groups" + " [node=" + node.name() + ", groups=" + rebalancedGroups + ']', rebalancedGroups.isEmpty()); } }
for (GridCacheContext ctx0 : grp.caches()) ctx0.continuousQueries().closeBackupUpdateCountersGaps(ctx0, part.id(), topVer, gaps);
for (GridCacheContext cctx : grp.caches()) { if (cctx.statisticsEnabled()) { final CacheMetricsImpl metrics = cctx.cache().metrics0(); for (GridCacheContext cctx : grp.caches()) { if (cctx.statisticsEnabled()) { final CacheMetricsImpl metrics = cctx.cache().metrics0(); int remaining = clearingPartitions.decrementAndGet(); for (GridCacheContext cctx : grp.caches()) { if (cctx.statisticsEnabled()) { final CacheMetricsImpl metrics = cctx.cache().metrics0(); int remaining = clearingPartitions.decrementAndGet(); for (GridCacheContext cctx : grp.caches()) { if (cctx.statisticsEnabled()) { final CacheMetricsImpl metrics = cctx.cache().metrics0();
ci.setGrpName(context.cacheOrGroupName()); ci.setGrpId(context.groupId()); ci.setCachesCnt(context.caches().size()); ci.setPartitions(context.config().getAffinity().partitions()); ci.setBackupsCnt(context.config().getBackups()); ci.setMode(context.config().getCacheMode()); ci.setAtomicityMode(context.config().getAtomicityMode()); ci.setMapped(mapped(context.caches().iterator().next().name()));
for (GridCacheContext ctx : grpCtx.caches()) cacheIdToCtx.put(ctx.cacheId(), ctx);
/** * */ private void assertCaches(IgniteEx igniteEx) { Collection<GridCacheContext> caches = igniteEx .context() .cache() .cacheGroup(CU.cacheId(STATIC_CACHE_CACHE_GROUP_NAME)) .caches(); assertEquals(CACHES_COUNT, caches.size()); @Nullable CacheGroupContext cacheGroup = igniteEx .context() .cache() .cacheGroup(CU.cacheId(STATIC_CACHE_CACHE_GROUP_NAME)); for (GridCacheContext cacheContext : caches) assertEquals(cacheContext.group(), cacheGroup); } }
corruptDataEntry(storedSysCacheCtx.caches().get(0), new GridCacheInternalKeyImpl("sq0", "default-ds-group"), true, false); corruptDataEntry(storedSysCacheCtx.caches().get(0), new GridCacheInternalKeyImpl("sq" + parts / 2, "default-ds-group"), false, true); corruptDataEntry(memorySysCacheCtx.caches().get(0), new GridCacheInternalKeyImpl("s0", "default-volatile-ds-group"), true, false); corruptDataEntry(memorySysCacheCtx.caches().get(0), new GridCacheInternalKeyImpl("s" + parts / 2, "default-volatile-ds-group"), false, true);
boolean first = true; for (GridCacheContext cache : grp.caches()) { long start0 = U.currentTimeMillis();
GridCacheContext cctx = cache.context(); if (!cctx.userCache() || !cctx.group().mvccEnabled() || F.isEmpty(cctx.group().caches()) || cctx.shared().closed(cctx)) continue;
if (!shared && (cctx = F.first(part.group().caches())) == null) return metrics;
SortedSet<String> cacheNames = grp.caches().stream() .map(GridCacheContext::name) .collect(Collectors.toCollection(TreeSet::new));
ci.setGrpName(context.cacheOrGroupName()); ci.setGrpId(context.groupId()); ci.setCachesCnt(context.caches().size()); ci.setPartitions(context.config().getAffinity().partitions()); ci.setBackupsCnt(context.config().getBackups()); ci.setMode(context.config().getCacheMode()); ci.setAtomicityMode(context.config().getAtomicityMode()); ci.setMapped(mapped(context.caches().iterator().next().name()));
for (GridCacheContext ctx : grpCtx.caches()) cacheIdToCtx.put(ctx.cacheId(), ctx);