/** * Constructor. * * @param creatorNodeId Creator node ID. */ protected TcpDiscoveryAbstractMessage(UUID creatorNodeId) { id = IgniteUuid.fromUuid(creatorNodeId); }
/** * @param node Failed node. * @param mvccCrd Mvcc coordinator at time of node failure. */ private NodeFailureTimeoutObject(ClusterNode node, MvccCoordinator mvccCrd) { super(IgniteUuid.fromUuid(cctx.localNodeId()), TX_SALVAGE_TIMEOUT); this.node = node; this.mvccCrd = mvccCrd; }
/** * Create file lock ID. * * @param del If lock ID is required for file deletion. * @return Lock ID. */ private IgniteUuid createFileLockId(boolean del) { if (del) return IgfsUtils.DELETE_LOCK_ID; return IgniteUuid.fromUuid(locNode.id()); }
/** * @param rtState Runtime state. * @param node Node. */ PingFuture(ZkRuntimeState rtState, ZookeeperClusterNode node) { this.rtState = rtState; this.node = node; id = IgniteUuid.fromUuid(node.id()); endTime = System.currentTimeMillis() + node.sessionTimeout() + 1000; };
/** * Creates new DynamicCacheChangeFailureMessage instance. * * @param locNode Local node. * @param exchId Exchange Id. * @param cause Cache start error. * @param cacheNames Cache names. */ public DynamicCacheChangeFailureMessage( ClusterNode locNode, GridDhtPartitionExchangeId exchId, IgniteCheckedException cause, Collection<String> cacheNames) { assert exchId != null; assert cause != null; assert !F.isEmpty(cacheNames) : cacheNames; this.id = IgniteUuid.fromUuid(locNode.id()); this.exchId = exchId; this.cause = cause; this.cacheNames = cacheNames; }
/** * @param taskName Task name. * @param arg Optional execution argument. * @param execName Name of the custom executor. * @return Task future. * @param <T> Task argument type. * @param <R> Task return value type. */ public <T, R> ComputeTaskInternalFuture<R> execute(String taskName, @Nullable T arg, @Nullable String execName) { assert taskName != null; lock.readLock(); try { if (stopping) throw new IllegalStateException("Failed to execute task due to grid shutdown: " + taskName); return startTask(taskName, null, null, IgniteUuid.fromUuid(ctx.localNodeId()), arg, false, execName); } finally { lock.readUnlock(); } }
/** * @param taskCls Task class. * @param arg Optional execution argument. * @param execName Name of the custom executor. * @return Task future. * @param <T> Task argument type. * @param <R> Task return value type. */ public <T, R> ComputeTaskInternalFuture<R> execute(Class<? extends ComputeTask<T, R>> taskCls, @Nullable T arg, @Nullable String execName) { assert taskCls != null; lock.readLock(); try { if (stopping) throw new IllegalStateException("Failed to execute task due to grid shutdown: " + taskCls); return startTask(null, taskCls, null, IgniteUuid.fromUuid(ctx.localNodeId()), arg, false, execName); } finally { lock.readUnlock(); } }
/** * @param impl Discovery implementation. * @param state Initial state. * @param timeout Wait timeout before initiating communication errors resolve. */ private ZkCommunicationErrorProcessFuture(ZookeeperDiscoveryImpl impl, State state, long timeout) { assert state != State.DONE; this.impl = impl; this.log = impl.log(); if (state == State.WAIT_TIMEOUT) { assert timeout > 0 : timeout; id = IgniteUuid.fromUuid(impl.localNode().id()); endTime = System.currentTimeMillis() + timeout; } else { id = null; endTime = 0; } this.state = state; }
/** * @param task Actual task. * @param arg Optional task argument. * @param sys If {@code true}, then system pool will be used. * @param execName Name of the custom executor. * @return Task future. * @param <T> Task argument type. * @param <R> Task return value type. */ public <T, R> ComputeTaskInternalFuture<R> execute(ComputeTask<T, R> task, @Nullable T arg, boolean sys, @Nullable String execName) { lock.readLock(); try { if (stopping) throw new IllegalStateException("Failed to execute task due to grid shutdown: " + task); return startTask(null, null, task, IgniteUuid.fromUuid(ctx.localNodeId()), arg, sys, execName); } finally { lock.readUnlock(); } }
@Override public Object call() throws Exception { UUID id = UUID.randomUUID(); try { while (!finish.get()) { set.add(IgniteUuid.fromUuid(id)); execCnt.increment(); } return null; } catch (Throwable t) { t.printStackTrace(); throw new Exception(t); } finally { X.println("Thread finished."); } } });
IgniteUuid ldrId = IgniteUuid.fromUuid(ctx.localNodeId());
@Override public void onEvent(Event evt) { int evtType = evt.type(); assert evtType == EVT_NODE_FAILED || evtType == EVT_NODE_LEFT; if (affMap.isEmpty()) return; // Skip empty affinity map. final DiscoveryEvent discoEvt = (DiscoveryEvent)evt; // Clean up affinity functions if such cache no more exists. final Collection<String> caches = ctx.cache().cacheNames(); final Collection<AffinityAssignmentKey> rmv = new HashSet<>(); for (AffinityAssignmentKey key : affMap.keySet()) { if (!caches.contains(key.cacheName) || key.topVer.topologyVersion() < discoEvt.topologyVersion() - 10) rmv.add(key); } if (!rmv.isEmpty()) { ctx.timeout().addTimeoutObject( new GridTimeoutObjectAdapter( IgniteUuid.fromUuid(ctx.localNodeId()), AFFINITY_MAP_CLEAN_UP_DELAY) { @Override public void onTimeout() { affMap.keySet().removeAll(rmv); } }); } } };
Object resTopic = TOPIC_EVENT.topic(IgniteUuid.fromUuid(ctx.localNodeId()));
/** * Starts multi-update lock. Will wait for topology future is ready. * * @return Topology version. * @throws IgniteCheckedException If failed. */ public AffinityTopologyVersion beginMultiUpdate() throws IgniteCheckedException { IgniteBiTuple<IgniteUuid, GridDhtTopologyFuture> tup = multiTxHolder.get(); if (tup != null) throw new IgniteCheckedException("Nested multi-update locks are not supported"); GridDhtPartitionTopology top = ctx.group().topology(); top.readLock(); GridDhtTopologyFuture topFut; AffinityTopologyVersion topVer; try { // While we are holding read lock, register lock future for partition release future. IgniteUuid lockId = IgniteUuid.fromUuid(ctx.localNodeId()); topVer = top.readyTopologyVersion(); MultiUpdateFuture fut = new MultiUpdateFuture(topVer); MultiUpdateFuture old = multiTxFuts.putIfAbsent(lockId, fut); assert old == null; topFut = top.topologyVersionFuture(); multiTxHolder.set(F.t(lockId, topFut)); } finally { top.readUnlock(); } topFut.get(); return topVer; }
throw new IgniteCheckedException("Node can not be null [mappedJob=" + mappedJob + ", ses=" + ses + ']'); IgniteUuid jobId = IgniteUuid.fromUuid(ctx.localNodeId());
IgniteUuid.fromUuid(ctx.localNodeId()), meta.userVersion(), meta.deploymentMode(),
/** * @param ctx Grid kernal context. */ public GridDeploymentManager(GridKernalContext ctx) { super(ctx, ctx.config().getDeploymentSpi()); if (!ctx.config().isPeerClassLoadingEnabled()) { DeploymentSpi spi = ctx.config().getDeploymentSpi(); IgnoreIfPeerClassLoadingDisabled ann = U.getAnnotation(spi.getClass(), IgnoreIfPeerClassLoadingDisabled.class); locDep = ann != null ? new LocalDeployment( ctx.config().getDeploymentMode(), ctx.config().getClassLoader() != null ? ctx.config().getClassLoader() : U.gridClassLoader(), IgniteUuid.fromUuid(ctx.localNodeId()), ctx.userVersion(U.gridClassLoader()), String.class.getName()) : null; } else locDep = null; }
/** * @throws Exception If failed. */ @Test public void testUUID() throws Exception { UUID uuid = UUID.randomUUID(); jcache().put("uuidKey", uuid); String ret = content(DEFAULT_CACHE_NAME, GridRestCommand.CACHE_GET, "key", "uuidKey"); info("Get command result: " + ret); assertCacheOperation(ret, uuid.toString()); IgniteUuid igniteUuid = IgniteUuid.fromUuid(uuid); jcache().put("igniteUuidKey", igniteUuid); ret = content(DEFAULT_CACHE_NAME, GridRestCommand.CACHE_GET, "key", "igniteUuidKey"); info("Get command result: " + ret); assertCacheOperation(ret, igniteUuid.toString()); }
IgniteUuid ldrId = IgniteUuid.fromUuid(ctx.localNodeId());
topic = TOPIC_DATASTREAM.topic(IgniteUuid.fromUuid(ctx.localNodeId()));