/** {@inheritDoc} */ @SuppressWarnings("NonPrivateFieldAccessedInSynchronizedContext") @Override protected void onNodeLeft(UUID nodeId) { boolean callOnPage; synchronized (this) { callOnPage = !loc && subgrid.contains(nodeId); } if (callOnPage) onPage(nodeId, Collections.emptyList(), new ClusterTopologyCheckedException("Remote node has left topology: " + nodeId), true); }
log.debug("Topology changed while reassigning (will retry): " + e.getMessage());
/** * Creates new topology exception for cases when primary node leaves grid during mapping. * * @param nodeId Node ID. * @return Topology exception with user-friendly message. */ private ClusterTopologyCheckedException newTopologyException(UUID nodeId) { ClusterTopologyCheckedException topEx = new ClusterTopologyCheckedException("Failed to enlist keys " + "(primary node left grid, retry transaction if possible) [node=" + nodeId + ']'); topEx.retryReadyFuture(cctx.shared().nextAffinityReadyFuture(topVer)); return topEx; }
@Override public IgniteException apply(IgniteCheckedException e) { ClusterTopologyException topEx = new ClusterTopologyException(e.getMessage(), e); ClusterTopologyCheckedException checked = (ClusterTopologyCheckedException)e; if (checked.retryReadyFuture() != null) topEx.retryReadyFuture(new IgniteFutureImpl<>(checked.retryReadyFuture())); return topEx; } });
/** * @param c Closure to run. * @throws IgniteCheckedException If failed. * @return Closure return value. */ private static <T> T retryTopologySafe(IgniteOutClosureX<T> c) throws IgniteCheckedException { for (int i = 0; i < GridCacheAdapter.MAX_RETRIES; i++) { try { return c.applyx(); } catch (IgniteCheckedException e) { if (i == GridCacheAdapter.MAX_RETRIES - 1) throw e; ClusterTopologyCheckedException topErr = e.getCause(ClusterTopologyCheckedException.class); if (topErr == null || (topErr instanceof ClusterTopologyServerNotFoundException)) throw e; IgniteInternalFuture<?> fut = topErr.retryReadyFuture(); if (fut != null) fut.get(); } } assert false; return null; }
@Override public void run() { buf0.onNodeLeft(); if (futs != null) { Throwable ex = new ClusterTopologyCheckedException( "Failed to wait for request completion (node has left): " + nodeId); for (int i = 0; i < futs.size(); i++) futs.get(i).onDone(ex); } } }, ctx.discovery().topologyVersion(), false);
/** * Creates new topology exception for cases when primary node leaves grid during mapping. * * @param nested Optional nested exception. * @param nodeId Node ID. * @return Topology exception with user-friendly message. */ private ClusterTopologyCheckedException newTopologyException(@Nullable Throwable nested, UUID nodeId) { ClusterTopologyCheckedException topEx = new ClusterTopologyCheckedException("Failed to acquire lock for keys " + "(primary node left grid, retry transaction if possible) [keys=" + keys + ", node=" + nodeId + ']', nested); topEx.retryReadyFuture(cctx.shared().nextAffinityReadyFuture(topVer)); return topEx; }
if (topErr.retryReadyFuture() != null) topErr.retryReadyFuture().get(); else U.sleep(1);
if (log.isDebugEnabled()) log.debug("Supplier has left [" + demandRoutineInfo(topicId, nodeId, supplyMsg) + ", errMsg=" + e.getMessage() + ']');
/** */ private void onCoordinatorFailed(UUID nodeId) { // 1. Notify all listeners waiting for a snapshot. Map<Long, MvccSnapshotResponseListener> map = snapLsnrs.remove(nodeId); if (map != null) { ClusterTopologyCheckedException ex = new ClusterTopologyCheckedException("Failed to request mvcc " + "version, coordinator left: " + nodeId); MvccSnapshotResponseListener lsnr; for (Long id : map.keySet()) { if ((lsnr = map.remove(id)) != null) lsnr.onError(ex); } } // 2. Notify acknowledge futures. for (WaitAckFuture fut : ackFuts.values()) fut.onNodeLeft(nodeId); }
/** * Creates new topology exception for cases when primary node leaves grid during mapping. * * @param nested Optional nested exception. * @param nodeId Node ID. * @return Topology exception with user-friendly message. */ private ClusterTopologyCheckedException newTopologyException(@Nullable Throwable nested, UUID nodeId) { ClusterTopologyCheckedException topEx = new ClusterTopologyCheckedException("Failed to acquire lock for keys " + "(primary node left grid, retry transaction if possible) [keys=" + keys + ", node=" + nodeId + ']', nested); topEx.retryReadyFuture(cctx.shared().nextAffinityReadyFuture(topVer)); return topEx; }
e.retryReadyFuture(cctx.nextAffinityReadyFuture(tx.topologyVersion()));
log.debug("Topology changed while reassigning (will retry): " + e.getMessage());
/** * * @param key Key. * @param node Mapped node. * @param missedNodesToKeysMapping Full node mapping. */ protected boolean checkRetryPermits( KeyCacheObject key, ClusterNode node, Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> missedNodesToKeysMapping ) { LinkedHashMap<KeyCacheObject, Boolean> keys = missedNodesToKeysMapping.get(node); if (keys != null && keys.containsKey(key)) { if (REMAP_CNT_UPD.incrementAndGet(this) > MAX_REMAP_CNT) { onDone(new ClusterTopologyCheckedException("Failed to remap key to a new node after " + MAX_REMAP_CNT + " attempts (key got remapped to the same node) [key=" + key + ", node=" + U.toShortString(node) + ", mappings=" + missedNodesToKeysMapping + ']')); return false; } } return true; }
/** {@inheritDoc} */ @Override public boolean onNodeLeft(UUID nodeId) { if (batches.keySet().contains(nodeId)) { if (log.isDebugEnabled()) log.debug("Found unacknowledged batch for left node [nodeId=" + nodeId + ", fut=" + this + ']'); ClusterTopologyCheckedException topEx = new ClusterTopologyCheckedException("Failed to enlist keys " + "(primary node left grid, retry transaction if possible) [node=" + nodeId + ']'); topEx.retryReadyFuture(cctx.shared().nextAffinityReadyFuture(topVer)); processFailure(topEx, null); batches.remove(nodeId); if (batches.isEmpty()) // Wait for all pending requests. onDone(); } if (log.isDebugEnabled()) log.debug("Future does not have mapping for left node (ignoring) [nodeId=" + nodeId + ", fut=" + this + ']'); return false; }
e.retryReadyFuture(cctx.nextAffinityReadyFuture(topVer));
if (log.isDebugEnabled()) log.debug("Supplier has left [" + demandRoutineInfo(topicId, nodeId, supplyMsg) + ", errMsg=" + e.getMessage() + ']');
/** * @param nodeId Destination node ID. * @param msg Message. * @param orderedTopic Topic for ordered notifications. * If {@code null}, non-ordered message will be sent. * @param ackC Ack closure. * @throws IgniteCheckedException In case of error. */ private void sendWithRetries(UUID nodeId, GridContinuousMessage msg, @Nullable Object orderedTopic, IgniteInClosure<IgniteException> ackC) throws IgniteCheckedException { assert nodeId != null; assert msg != null; ClusterNode node = ctx.discovery().node(nodeId); if (node != null) sendWithRetries(node, msg, orderedTopic, ackC); else throw new ClusterTopologyCheckedException("Node for provided ID doesn't exist (did it leave the grid?): " + nodeId); }
/** {@inheritDoc} */ @Override public boolean onNodeLeft(UUID nodeId) { boolean found = false; for (IgniteInternalFuture<?> fut : futures()) { if (fut instanceof MiniFuture) { MiniFuture f = (MiniFuture)fut; if (f.primary().id().equals(nodeId)) { ClusterTopologyCheckedException e = new ClusterTopologyCheckedException("Remote node left grid: " + nodeId); e.retryReadyFuture(cctx.nextAffinityReadyFuture(tx.topologyVersion())); f.onNodeLeft(e); found = true; } } } return found; }
ClusterTopologyCheckedException topEx = X.cause(e, ClusterTopologyCheckedException.class); retryFut = topEx.retryReadyFuture();