/** * Starts processing. */ public void init() { if (log.isInfoEnabled()) { log.info("Starting delivery partition countres to remote nodes [txId=" + tx.nearXidVersion() + ", futId=" + futId); } HashSet<UUID> siblings = siblingBackups(); cctx.mvcc().addFuture(this, futId); for (UUID peer : siblings) { List<PartitionUpdateCountersMessage> cntrs = cctx.tm().txHandler() .filterUpdateCountersForBackupNode(tx, cctx.node(peer)); if (F.isEmpty(cntrs)) continue; MiniFuture miniFut = new MiniFuture(peer); try { cctx.io().send(peer, new PartitionCountersNeighborcastRequest(cntrs, futId), SYSTEM_POOL); add(miniFut); } catch (IgniteCheckedException e) { if (!(e instanceof ClusterTopologyCheckedException)) log.warning("Failed to send partition counters to remote node [node=" + peer + ']', e); else logNodeLeft(peer); miniFut.onDone(); } } markInitialized(); }
/** * Starts processing. */ public void init() { if (log.isInfoEnabled()) { log.info("Starting delivery partition countres to remote nodes [txId=" + tx.nearXidVersion() + ", futId=" + futId); } HashSet<UUID> siblings = siblingBackups(); cctx.mvcc().addFuture(this, futId); for (UUID peer : siblings) { List<PartitionUpdateCountersMessage> cntrs = cctx.tm().txHandler() .filterUpdateCountersForBackupNode(tx, cctx.node(peer)); if (F.isEmpty(cntrs)) continue; MiniFuture miniFut = new MiniFuture(peer); try { cctx.io().send(peer, new PartitionCountersNeighborcastRequest(cntrs, futId), SYSTEM_POOL); add(miniFut); } catch (IgniteCheckedException e) { if (!(e instanceof ClusterTopologyCheckedException)) log.warning("Failed to send partition counters to remote node [node=" + peer + ']', e); else logNodeLeft(peer); miniFut.onDone(); } } markInitialized(); }