/** * @param cancelled Is cancelled. */ private void checkIsDone(boolean cancelled) { if (remaining.isEmpty()) { sendRebalanceFinishedEvent(); if (log.isInfoEnabled()) log.info("Completed rebalance future: " + this); if (log.isDebugEnabled()) log.debug("Partitions have been scheduled to resend [reason=" + "Rebalance is done [grp=" + grp.cacheOrGroupName() + "]"); ctx.exchange().scheduleResendPartitions(); Collection<Integer> m = new HashSet<>(); for (Map.Entry<UUID, Collection<Integer>> e : missed.entrySet()) { if (e.getValue() != null && !e.getValue().isEmpty()) m.addAll(e.getValue()); } if (!m.isEmpty()) { U.log(log, ("Reassigning partitions that were missed: " + m)); onDone(false); //Finished but has missed partitions, will force dummy exchange ctx.exchange().forceReassign(exchId); return; } if (!cancelled && !grp.preloader().syncFuture().isDone()) ((GridFutureAdapter)grp.preloader().syncFuture()).onDone(); onDone(!cancelled); } }
"Evictions are done [grp" + grp.cacheOrGroupName() + "]"); ctx.exchange().scheduleResendPartitions();
/** * Detect lost partitions. * * @param resTopVer Result topology version. */ private void detectLostPartitions(AffinityTopologyVersion resTopVer) { boolean detected = false; synchronized (cctx.exchange().interruptLock()) { if (Thread.currentThread().isInterrupted()) return; for (CacheGroupContext grp : cctx.cache().cacheGroups()) { if (!grp.isLocal()) { // Do not trigger lost partition events on start. boolean event = !localJoinExchange() && !activateCluster(); boolean detectedOnGrp = grp.topology().detectLostPartitions(resTopVer, event ? events().lastEvent() : null); detected |= detectedOnGrp; } } } if (detected) { if (log.isDebugEnabled()) log.debug("Partitions have been scheduled to resend [reason=" + "Lost partitions detect on " + resTopVer + "]"); cctx.exchange().scheduleResendPartitions(); } timeBag.finishGlobalStage("Detect lost partitions"); }
/** * Resends partitions on partition evict within configured timeout. * * @param part Evicted partition. * @param updateSeq Update sequence. */ public void onPartitionEvicted(GridDhtLocalPartition part, boolean updateSeq) { if (!enterBusy()) return; try { top.onEvicted(part, updateSeq); if (grp.eventRecordable(EVT_CACHE_REBALANCE_PART_UNLOADED)) grp.addUnloadEvent(part.id()); if (updateSeq) { if (log.isDebugEnabled()) log.debug("Partitions have been scheduled to resend [reason=" + "Eviction [grp" + grp.cacheOrGroupName() + " " + part.id() + "]"); ctx.exchange().scheduleResendPartitions(); } } finally { leaveBusy(); } }
log.debug("Partitions have been scheduled to resend [reason=Single update from " + node.id() + "]"); scheduleResendPartitions();
"Full map update [grp" + grp.cacheOrGroupName() + "]"); ctx.exchange().scheduleResendPartitions();
"Single map update [grp" + grp.cacheOrGroupName() + "]"); ctx.exchange().scheduleResendPartitions();
log.debug("Schedule resend parititions due to snapshot in progress"); scheduleResendPartitions();
ignite.context().cache().context().exchange().scheduleResendPartitions();
/** * @param cancelled Is cancelled. */ private void checkIsDone(boolean cancelled) { if (remaining.isEmpty()) { sendRebalanceFinishedEvent(); if (log.isInfoEnabled()) log.info("Completed rebalance future: " + this); if (log.isDebugEnabled()) log.debug("Partitions have been scheduled to resend [reason=" + "Rebalance is done [grp=" + grp.cacheOrGroupName() + "]"); ctx.exchange().scheduleResendPartitions(); Collection<Integer> m = new HashSet<>(); for (Map.Entry<UUID, Collection<Integer>> e : missed.entrySet()) { if (e.getValue() != null && !e.getValue().isEmpty()) m.addAll(e.getValue()); } if (!m.isEmpty()) { U.log(log, ("Reassigning partitions that were missed: " + m)); onDone(false); //Finished but has missed partitions, will force dummy exchange ctx.exchange().forceReassign(exchId); return; } if (!cancelled && !grp.preloader().syncFuture().isDone()) ((GridFutureAdapter)grp.preloader().syncFuture()).onDone(); onDone(!cancelled); } }
/** * Detect lost partitions. * * @param resTopVer Result topology version. */ private void detectLostPartitions(AffinityTopologyVersion resTopVer) { boolean detected = false; long time = System.currentTimeMillis(); synchronized (cctx.exchange().interruptLock()) { if (Thread.currentThread().isInterrupted()) return; for (CacheGroupContext grp : cctx.cache().cacheGroups()) { if (!grp.isLocal()) { boolean detectedOnGrp = grp.topology().detectLostPartitions(resTopVer, events().lastEvent()); detected |= detectedOnGrp; } } } if (detected) { if (log.isDebugEnabled()) log.debug("Partitions have been scheduled to resend [reason=" + "Lost partitions detect on " + resTopVer + "]"); cctx.exchange().scheduleResendPartitions(); } if (log.isInfoEnabled()) log.info("Detecting lost partitions performed in " + (System.currentTimeMillis() - time) + " ms."); }
"Evictions are done [grp" + grp.cacheOrGroupName() + "]"); ctx.exchange().scheduleResendPartitions();
/** * Resends partitions on partition evict within configured timeout. * * @param part Evicted partition. * @param updateSeq Update sequence. */ public void onPartitionEvicted(GridDhtLocalPartition part, boolean updateSeq) { if (!enterBusy()) return; try { top.onEvicted(part, updateSeq); if (grp.eventRecordable(EVT_CACHE_REBALANCE_PART_UNLOADED)) grp.addUnloadEvent(part.id()); if (updateSeq) { if (log.isDebugEnabled()) log.debug("Partitions have been scheduled to resend [reason=" + "Eviction [grp" + grp.cacheOrGroupName() + " " + part.id() + "]"); ctx.exchange().scheduleResendPartitions(); } } finally { leaveBusy(); } }
"Single map update [grp" + grp.cacheOrGroupName() + "]"); ctx.exchange().scheduleResendPartitions();
log.debug("Partitions have been scheduled to resend [reason=Single update from " + node.id() + "]"); scheduleResendPartitions();
"Full map update [grp" + grp.cacheOrGroupName() + "]"); ctx.exchange().scheduleResendPartitions();