/** * @param keysMap Keys to request. * @return Keys request future. */ private IgniteInternalFuture<Object> forceRebalanceKeys(Map<Integer, Collection<KeyCacheObject>> keysMap) { if (F.isEmpty(keysMap)) return null; GridCompoundFuture<Object, Object> compFut = null; IgniteInternalFuture<Object> lastForceFut = null; for (Map.Entry<Integer, Collection<KeyCacheObject>> entry : keysMap.entrySet()) { if (lastForceFut != null && compFut == null) { compFut = new GridCompoundFuture(); compFut.add(lastForceFut); } int cacheId = entry.getKey(); Collection<KeyCacheObject> keys = entry.getValue(); GridCacheContext ctx = cctx.cacheContext(cacheId); lastForceFut = ctx.group().preloader().request(ctx, keys, tx.topologyVersion()); if (compFut != null && lastForceFut != null) compFut.add(lastForceFut); } if (compFut != null) { compFut.markInitialized(); return compFut; } else return lastForceFut; }
preloader.dumpDebugInfo();
if (cctx.group().preloader().needForceKeys()) { GridDhtFuture<Object> fut = cctx.group().preloader().request( cctx, Collections.singleton(key),
/** * @param cancelled Is cancelled. */ private void checkIsDone(boolean cancelled) { if (remaining.isEmpty()) { sendRebalanceFinishedEvent(); if (log.isInfoEnabled()) log.info("Completed rebalance future: " + this); if (log.isDebugEnabled()) log.debug("Partitions have been scheduled to resend [reason=" + "Rebalance is done [grp=" + grp.cacheOrGroupName() + "]"); ctx.exchange().scheduleResendPartitions(); Collection<Integer> m = new HashSet<>(); for (Map.Entry<UUID, Collection<Integer>> e : missed.entrySet()) { if (e.getValue() != null && !e.getValue().isEmpty()) m.addAll(e.getValue()); } if (!m.isEmpty()) { U.log(log, ("Reassigning partitions that were missed: " + m)); onDone(false); //Finished but has missed partitions, will force dummy exchange ctx.exchange().forceReassign(exchId); return; } if (!cancelled && !grp.preloader().syncFuture().isDone()) ((GridFutureAdapter)grp.preloader().syncFuture()).onDone(); onDone(!cancelled); } }
task.part().group().preloader().rebalanceFuture().listen(f -> cleanupQueue.add(task));
/** {@inheritDoc} */ @Override public IgniteInternalFuture<?> rebalance() { return ctx.preloader().forceRebalance(); }
/** * */ public void onKernalStop() { if (!isRecoveryMode()) { aff.cancelFutures(new IgniteCheckedException("Failed to wait for topology update, node is stopping.")); preldr.onKernalStop(); } offheapMgr.onKernalStop(); }
/** * @param startTopVer Cache start version. * @param err Cache start error if any. */ void initCacheProxies(AffinityTopologyVersion startTopVer, @Nullable Throwable err) { for (GridCacheAdapter<?, ?> cache : caches.values()) { GridCacheContext<?, ?> cacheCtx = cache.context(); if (cacheCtx.startTopologyVersion().equals(startTopVer)) { if (!jCacheProxies.containsKey(cacheCtx.name())) { IgniteCacheProxyImpl<?, ?> newProxy = new IgniteCacheProxyImpl(cache.context(), cache, false); if (!cache.active()) newProxy.suspend(); addjCacheProxy(cacheCtx.name(), newProxy); } if (cacheCtx.preloader() != null) cacheCtx.preloader().onInitialExchangeComplete(err); } } }
/** * */ public void onReconnected() { aff.onReconnected(); if (top != null) top.onReconnected(); preldr.onReconnected(); }
grp.preloader().onTopologyChanged(this);
/** * @param setId Set ID. * @param topVer Topology version. * @throws IgniteCheckedException If failed. */ private void removeSetData(IgniteUuid setId, AffinityTopologyVersion topVer) throws IgniteCheckedException { boolean loc = cctx.isLocal(); GridCacheAffinityManager aff = cctx.affinity(); if (!loc) { aff.affinityReadyFuture(topVer).get(); cctx.preloader().syncFuture().get(); } IgniteInternalCache<?, ?> cache = cctx.cache(); final int BATCH_SIZE = 100; Collection<SetItemKey> keys = new ArrayList<>(BATCH_SIZE); for (Cache.Entry entry : cache.localEntries(new CachePeekMode[] {CachePeekMode.PRIMARY})) { Object obj = entry.getKey(); if (!(obj instanceof SetItemKey && setId.equals(((SetItemKey)obj).setId()))) continue; keys.add((SetItemKey)obj); if (keys.size() == BATCH_SIZE) { retryRemoveAll(cache, keys); keys.clear(); } } if (!keys.isEmpty()) retryRemoveAll(cache, keys); }
/** * @throws Exception Exception. */ @Test public void testNodeFailedAtRebalancing() throws Exception { IgniteEx ignite = startGrid(0); generateData(ignite, 0, 0); log.info("Preloading started."); startGrid(1); GridDhtPartitionDemander.RebalanceFuture fut = (GridDhtPartitionDemander.RebalanceFuture)grid(1).context(). cache().internalCache(CACHE_NAME_DHT_REPLICATED).preloader().rebalanceFuture(); fut.get(); U.sleep(10); ((TestTcpDiscoverySpi)grid(1).configuration().getDiscoverySpi()).simulateNodeFailure(); awaitPartitionMapExchange(false, false, Collections.singletonList(ignite.localNode())); checkSupplyContextMapIsEmpty(); } }
if (cctx.group().preloader().needForceKeys()) { GridDhtFuture<Object> fut = cctx.group().preloader().request( cctx, Collections.singleton(key),
/** {@inheritDoc} */ @Override public IgniteFuture<Boolean> rebalance() { GridCacheContext<K, V> ctx = getContextSafe(); return new IgniteFutureImpl<>(ctx.preloader().forceRebalance()); }
/** * */ void stopGroup() { offheapMgr.stop(); if (isRecoveryMode()) return; IgniteCheckedException err = new IgniteCheckedException("Failed to wait for topology update, cache (or node) is stopping."); ctx.evict().onCacheGroupStopped(this); aff.cancelFutures(err); preldr.onKernalStop(); ctx.io().removeCacheGroupHandlers(grpId); }
grp.preloader().onInitialExchangeComplete(null);
/** * */ public void onReconnected() { aff.onReconnected(); if (top != null) top.onReconnected(); preldr.onReconnected(); }
grp.preloader().onTopologyChanged(this);
GridDhtFuture<Object> fut = cctx.group().preloader().request(cctx, keys.keySet(), topVer);