/** * */ public void onDisconnected() { cachesOnDisconnect = new CachesOnDisconnect( ctx.state().clusterState(), new HashMap<>(registeredCacheGrps), new HashMap<>(registeredCaches)); registeredCacheGrps.clear(); registeredCaches.clear(); registeredTemplates.clear(); clientReconnectReqs = null; }
/** * @param ctx Context. * * @return instance of current baseline topology if it exists */ public static BaselineTopology getBaselineTopology(@NotNull GridKernalContext ctx) { return ctx.state().clusterState().baselineTopology(); }
/** Whether cluster is active at this moment or not. Also returns {@code true} if cluster is being activated. */ private boolean isActive() { return ctx.state().clusterState().active(); }
/** {@inheritDoc} */ @Override public long getRowCount() { BaselineTopology blt = ctx.state().clusterState().baselineTopology(); return blt == null ? 0 : blt.consistentIds().size(); } }
/** Returns current baseline topology id of {@code -1} if there's no baseline topology found. */ private int getBaselineTopologyId() { BaselineTopology baselineTop = ctx.state().clusterState().baselineTopology(); return baselineTop != null ? baselineTop.id() : -1; }
/** {@inheritDoc} */ @Override public Iterator<Row> getRows(Session ses, SearchRow first, SearchRow last) { List<Row> rows = new ArrayList<>(); BaselineTopology blt = ctx.state().clusterState().baselineTopology(); if (blt == null) return rows.iterator(); Set<Object> consistentIds = blt.consistentIds(); Collection<ClusterNode> srvNodes = ctx.discovery().aliveServerNodes(); Set<Object> aliveNodeIds = new HashSet<>(F.nodeConsistentIds(srvNodes)); for (Object consistentId : consistentIds) { rows.add( createRow( ses, consistentId, aliveNodeIds.contains(consistentId) ) ); } return rows.iterator(); }
/** {@inheritDoc} */ @Nullable @Override public Collection<BaselineNode> currentBaselineTopology() { guard(); try { BaselineTopology blt = ctx.state().clusterState().baselineTopology(); return blt != null ? blt.currentBaseline() : null; } finally { unguard(); } }
/** */ @Nullable private Collection<Object> onlineBaselineNodesRequestedForRemoval( Collection<? extends BaselineNode> newBlt) { BaselineTopology blt = ctx.state().clusterState().baselineTopology(); Set<Object> bltConsIds; if (blt == null) return null; else bltConsIds = blt.consistentIds(); ArrayList<Object> onlineNodesRequestedForRemoval = new ArrayList<>(); Collection<Object> aliveNodesConsIds = getConsistentIds(ctx.discovery().aliveServerNodes()); Collection<Object> newBltConsIds = getConsistentIds(newBlt); for (Object oldBltConsId : bltConsIds) { if (aliveNodesConsIds.contains(oldBltConsId)) { if (!newBltConsIds.contains(oldBltConsId)) onlineNodesRequestedForRemoval.add(oldBltConsId); } } return onlineNodesRequestedForRemoval; }
/** * Merging config or resaving it if it needed. * * @param patchesToApply Patches which need to apply. * @param cachesToSave Caches which need to resave. * @param hasSchemaPatchConflict {@code true} if we have conflict during making patch. */ private void updateRegisteredCachesIfNeeded(Map<DynamicCacheDescriptor, QuerySchemaPatch> patchesToApply, Collection<DynamicCacheDescriptor> cachesToSave, boolean hasSchemaPatchConflict) { //Skip merge of config if least one conflict was found. if (!hasSchemaPatchConflict && isMergeConfigSupports(ctx.discovery().localNode())) { boolean isClusterActive = ctx.state().clusterState().active(); //Merge of config for cluster only for inactive grid. if (!isClusterActive && !patchesToApply.isEmpty()) { for (Map.Entry<DynamicCacheDescriptor, QuerySchemaPatch> entry : patchesToApply.entrySet()) { if (entry.getKey().applySchemaPatch(entry.getValue())) saveCacheConfiguration(entry.getKey()); } for (DynamicCacheDescriptor descriptor : cachesToSave) { saveCacheConfiguration(descriptor); } } else if (patchesToApply.isEmpty()) { for (DynamicCacheDescriptor descriptor : cachesToSave) { saveCacheConfiguration(descriptor); } } } }
/** * Executes validation checks of cluster state and BaselineTopology before changing BaselineTopology to new one. */ private void validateBeforeBaselineChange(Collection<? extends BaselineNode> baselineTop) { verifyBaselineTopologySupport(ctx.discovery().discoCache()); if (!ctx.state().clusterState().active()) throw new IgniteException("Changing BaselineTopology on inactive cluster is not allowed."); if (baselineTop != null) { if (baselineTop.isEmpty()) throw new IgniteException("BaselineTopology must contain at least one node."); Collection<Object> onlineNodes = onlineBaselineNodesRequestedForRemoval(baselineTop); if (onlineNodes != null) { if (!onlineNodes.isEmpty()) throw new IgniteException("Removing online nodes from BaselineTopology is not supported: " + onlineNodes); } } }
/** {@inheritDoc} */ @Override public boolean isNodeInBaseline() { ClusterNode locNode = localNode(); if (locNode.isClient() || locNode.isDaemon()) return false; DiscoveryDataClusterState clusterState = ctx.state().clusterState(); return clusterState.hasBaselineTopology() && CU.baselineNode(locNode, clusterState); }
createDiscoCache( AffinityTopologyVersion.NONE, ctx.state().clusterState(), node, locNodeOnlyTop),
/** {@inheritDoc} */ @Override public Map<Integer, CacheMetrics> cacheMetrics() { try { if (disableCacheMetricsUpdate) return Collections.emptyMap(); /** Caches should not be accessed while state transition is in progress. */ if (ctx.state().clusterState().transition()) return Collections.emptyMap(); Collection<GridCacheAdapter<?, ?>> caches = ctx.cache().internalCaches(); if (!F.isEmpty(caches)) { Map<Integer, CacheMetrics> metrics = U.newHashMap(caches.size()); for (GridCacheAdapter<?, ?> cache : caches) { if (cache.context().statisticsEnabled() && cache.context().started() && cache.context().affinity().affinityTopologyVersion().topologyVersion() > 0) metrics.put(cache.context().cacheId(), cache.localMetrics()); } return metrics; } } catch (Exception e) { U.warn(log, "Failed to compute cache metrics", e); } return Collections.emptyMap(); } };
/** * @param clientData Discovery data. * @param clientNodeId Client node ID. */ private void processClientReconnectData(CacheClientReconnectDiscoveryData clientData, UUID clientNodeId) { DiscoveryDataClusterState state = ctx.state().clusterState(); if (state.active() && !state.transition()) { for (CacheClientReconnectDiscoveryData.CacheInfo cacheInfo : clientData.clientCaches().values()) { String cacheName = cacheInfo.config().getName(); if (surviveReconnect(cacheName)) ctx.discovery().addClientNode(cacheName, clientNodeId, false); else { DynamicCacheDescriptor desc = registeredCaches.get(cacheName); if (desc != null && desc.deploymentId().equals(cacheInfo.deploymentId())) ctx.discovery().addClientNode(cacheName, clientNodeId, cacheInfo.nearCache()); } } } }
List<GridCacheAdapter> reconnected = new ArrayList<>(caches.size()); DiscoveryDataClusterState state = ctx.state().clusterState();
/** * Logs Tx state to WAL if needed. * * @param tx Transaction. * @return WALPointer or {@code null} if nothing was logged. */ @Nullable WALPointer logTxRecord(IgniteTxAdapter tx) { BaselineTopology baselineTop; // Log tx state change to WAL. if (cctx.wal() == null || (!logTxRecords && !tx.txState().mvccEnabled()) || (baselineTop = cctx.kernalContext().state().clusterState().baselineTopology()) == null || !baselineTop.consistentIds().contains(cctx.localNode().consistentId())) return null; Map<Short, Collection<Short>> nodes = tx.consistentIdMapper.mapToCompactIds(tx.topVer, tx.txNodes, baselineTop); TxRecord record; if (tx.txState().mvccEnabled()) record = new MvccTxRecord(tx.state(), tx.nearXidVersion(), tx.writeVersion(), nodes, tx.mvccSnapshot()); else record = new TxRecord(tx.state(), tx.nearXidVersion(), tx.writeVersion(), nodes); try { return cctx.wal().log(record); } catch (IgniteCheckedException e) { U.error(log, "Failed to log TxRecord: " + record, e); throw new IgniteException("Failed to log TxRecord: " + record, e); } }
/** * Log exchange event. * * @param evt Discovery event. */ private void logExchange(DiscoveryEvent evt) { if (cctx.kernalContext().state().publicApiActiveState(false) && cctx.wal() != null) { if (cctx.wal().serializerVersion() > 1) try { ExchangeRecord.Type type = null; if (evt.type() == EVT_NODE_JOINED) type = ExchangeRecord.Type.JOIN; else if (evt.type() == EVT_NODE_LEFT || evt.type() == EVT_NODE_FAILED) type = ExchangeRecord.Type.LEFT; BaselineTopology blt = cctx.kernalContext().state().clusterState().baselineTopology(); if (type != null && blt != null) { Short constId = blt.consistentIdMapping().get(evt.eventNode().consistentId()); if (constId != null) cctx.wal().log(new ExchangeRecord(constId, type)); } } catch (IgniteCheckedException e) { U.error(log, "Fail during log exchange record.", e); } } }
if (!ctx.state().clusterState().active()) { U.quietAndInfo(log, ">>> Ignite cluster is not active (limited functionality available). " + "Use control.(sh|bat) script or IgniteCluster interface to activate.");
DiscoveryDataClusterState state = ctx.state().clusterState();
DiscoveryDataClusterState clusterState = crd.context().state().clusterState();