ClusterTopologyException topEx = (ClusterTopologyException)e.getCause(); topEx.retryReadyFuture().get(); IgniteFuture<?> fut = e.retryReadyFuture();
@Override public IgniteException apply(IgniteCheckedException e) { ClusterTopologyException topEx = new ClusterTopologyException(e.getMessage(), e); ClusterTopologyCheckedException checked = (ClusterTopologyCheckedException)e; if (checked.retryReadyFuture() != null) topEx.retryReadyFuture(new IgniteFutureImpl<>(checked.retryReadyFuture())); return topEx; } });
jr.getJobContext().getJobId(), null, null, null, null, null, null, false, null); fakeRes.setFakeException(new ClusterTopologyException("Node has left grid: " + nodeId));
/** * @param part Partition. * @param qry Query. * @param qryMgr Query manager. * @param cctx Cache context. */ private ScanQueryFallbackClosableIterator(int part, GridCacheQueryAdapter qry, GridCacheQueryManager qryMgr, GridCacheContext cctx) { this.qry = qry; this.qryMgr = qryMgr; this.cctx = cctx; this.part = part; nodes = fallbacks(cctx.shared().exchange().readyAffinityVersion()); if (F.isEmpty(nodes)) throw new ClusterTopologyException("Failed to execute the query " + "(all affinity nodes left the grid) [cache=" + cctx.name() + ", qry=" + qry + ", startTopVer=" + cctx.versions().last().topologyVersion() + ", curTopVer=" + qryMgr.queryTopologyVersion().topologyVersion() + ']'); init(); }
/** * Handles cache operation exception. * @param e Exception */ void handleCacheOperationException(Exception e){ IgniteFuture<?> retryFut = null; if (e instanceof IllegalStateException) { initCache(); return; } else if (X.hasCause(e, IgniteClientDisconnectedException.class)) { IgniteClientDisconnectedException cause = X.cause(e, IgniteClientDisconnectedException.class); assert cause != null : e; retryFut = cause.reconnectFuture(); } else if (X.hasCause(e, ClusterTopologyException.class)) { ClusterTopologyException cause = X.cause(e, ClusterTopologyException.class); assert cause != null : e; retryFut = cause.retryReadyFuture(); } if (retryFut != null) { try { retryFut.get(retriesTimeout); } catch (IgniteException retryErr) { throw new IgniteException("Failed to wait for retry: " + retryErr); } } }
@Override public IgniteException apply(IgniteCheckedException e) { ClusterTopologyException topEx = new ClusterTopologyException(e.getMessage(), e); ClusterTopologyCheckedException checked = (ClusterTopologyCheckedException)e; if (checked.retryReadyFuture() != null) topEx.retryReadyFuture(new IgniteFutureImpl<>(checked.retryReadyFuture())); return topEx; } });
resFut.onDone(new ClusterTopologyException("Failed to map key to node " + "(no nodes with cache found in topology) [infos=" + entries.size() + ", cacheName=" + cacheName + ']'));
/** * Performs an operation with transaction with retries. * * @param cache Cache to do the transaction on. * @param clo Closure. * @return Result of closure execution. * @throws IgniteCheckedException If failed. */ public static <T> T doInTransactionWithRetries(IgniteInternalCache cache, IgniteOutClosureX<T> clo) throws IgniteCheckedException { assert cache != null; int attempts = 0; while (attempts < MAX_CACHE_TX_RETRIES) { try (Transaction tx = cache.txStart(PESSIMISTIC, REPEATABLE_READ)) { T res = clo.applyx(); tx.commit(); return res; } catch (IgniteException | IgniteCheckedException e) { ClusterTopologyException cte = X.cause(e, ClusterTopologyException.class); if (cte != null) ((IgniteFutureImpl)cte.retryReadyFuture()).internalFuture().getUninterruptibly(); else throw U.cast(e); } attempts++; } throw new IgniteCheckedException("Failed to perform operation since max number of attempts " + "exceeded. [maxAttempts=" + MAX_CACHE_TX_RETRIES + ']'); }
case "BinaryObjectException": throw new BinaryObjectException(arg); case "ClusterGroupEmptyException": throw new ClusterGroupEmptyException(arg); case "ClusterTopologyException": throw new ClusterTopologyException(arg); case "ComputeExecutionRejectedException": throw new ComputeExecutionRejectedException(arg); case "ComputeJobFailoverException": throw new ComputeJobFailoverException(arg);
ClusterTopologyException topEx = (ClusterTopologyException)e.getCause(); topEx.retryReadyFuture().get(); IgniteFuture<?> fut = e.retryReadyFuture();
throw new ClusterTopologyException("Topology was changed. Please retry on stable topology.");
/** * Gets retry or reconnect future if passed in {@code 'Exception'} has corresponding class in {@code 'cause'} * hierarchy. * * @param e {@code Exception}. * @return Internal future. * @throws Exception If unable to find retry or reconnect future. */ private IgniteFuture<?> getRetryFuture(Exception e) throws Exception { if (X.hasCause(e, IgniteClientDisconnectedException.class)) { IgniteClientDisconnectedException cause = X.cause(e, IgniteClientDisconnectedException.class); assertNotNull(cause); return cause.reconnectFuture(); } else if (X.hasCause(e, ClusterTopologyException.class)) { ClusterTopologyException cause = X.cause(e, ClusterTopologyException.class); assertNotNull(cause); return cause.retryReadyFuture(); } else throw e; }
res.getJobContext().getJobId(), null, null, null, null, null, null, false, null); fakeRes.setFakeException(new ClusterTopologyException("Failed to send job due to node failure: " + node)); ", taskSesId=" + ses.getId() + ", jobSesId=" + res.getJobContext().getJobId() + ']'); fakeErr = new ClusterTopologyException("Failed to send job due to node failure: " + node, e);
((IgniteFutureImpl)cte.retryReadyFuture()).internalFuture().getUninterruptibly(); else throw U.cast(e);
jr.getJobContext().getJobId(), null, null, null, null, null, null, false, null); fakeRes.setFakeException(new ClusterTopologyException("Node has left grid: " + nodeId));
/** * Handles cache operation exception. * @param e Exception */ @SuppressWarnings("ThrowableResultOfMethodCallIgnored") void handleCacheOperationException(Exception e){ IgniteFuture<?> retryFut = null; if (e instanceof IllegalStateException) { initCache(); return; } else if (X.hasCause(e, IgniteClientDisconnectedException.class)) { IgniteClientDisconnectedException cause = X.cause(e, IgniteClientDisconnectedException.class); assert cause != null : e; retryFut = cause.reconnectFuture(); } else if (X.hasCause(e, ClusterTopologyException.class)) { ClusterTopologyException cause = X.cause(e, ClusterTopologyException.class); assert cause != null : e; retryFut = cause.retryReadyFuture(); } if (retryFut != null) { try { retryFut.get(retriesTimeout); } catch (IgniteException retryErr) { throw new IgniteException("Failed to wait for retry: " + retryErr); } } }
/** * @param part Partition. * @param qry Query. * @param qryMgr Query manager. * @param cctx Cache context. */ private ScanQueryFallbackClosableIterator(int part, GridCacheQueryAdapter qry, GridCacheQueryManager qryMgr, GridCacheContext cctx) { this.qry = qry; this.qryMgr = qryMgr; this.cctx = cctx; this.part = part; nodes = fallbacks(cctx.shared().exchange().readyAffinityVersion()); if (F.isEmpty(nodes)) throw new ClusterTopologyException("Failed to execute the query " + "(all affinity nodes left the grid) [cache=" + cctx.name() + ", qry=" + qry + ", startTopVer=" + cctx.versions().last().topologyVersion() + ", curTopVer=" + qryMgr.queryTopologyVersion().topologyVersion() + ']'); init(); }
resFut.onDone(new ClusterTopologyException("Failed to map key to node " + "(no nodes with cache found in topology) [infos=" + entries.size() + ", cacheName=" + cacheName + ']'));
throw new ClusterTopologyException("Can not reserve partition. Please retry on stable topology.");
throw new ClusterTopologyException("Topology was changed. Please retry on stable topology.");