/** * Formats Server Address specification so that TensorFlow accepts it. * * @param ignite Ignite instance. * @return Formatted server address specification. */ public String format(Ignite ignite) { Collection<String> names = ignite.cluster().forNodeId(nodeId).hostNames(); return names.iterator().next() + ":" + port; }
/** * Returns host identifier by node identifier. * * @param nodeId Node identifier. * @return Host identifier. */ private HostIdentifier getHostIdentifier(UUID nodeId) { try { ClusterGroup grp = ignite.cluster().forNodeId(nodeId); return ignite.compute(grp).call(() -> { Enumeration<NetworkInterface> interfaces = NetworkInterface.getNetworkInterfaces(); List<byte[]> macAddrs = new ArrayList<>(); while (interfaces.hasMoreElements()) { NetworkInterface netItf = interfaces.nextElement(); byte[] macAddr = netItf.getHardwareAddress(); macAddrs.add(macAddr); } return new HostIdentifier(macAddrs.toArray(new byte[macAddrs.size()][])); }); } catch (ClusterGroupEmptyException e) { return null; } }
LongRunningProcessTask<List<E>> task = taskSupplier.apply(nodeProcesses); ClusterGroup clusterGrp = ignite.cluster().forNodeId(nodeId);
/** * @return {@link IgniteCompute} instance to communicate with remote node. */ public IgniteCompute remoteCompute() { if (locJvmGrid == null) return null; ClusterGroup grp = locJvmGrid.cluster().forNodeId(id); if (grp.nodes().isEmpty()) throw new IllegalStateException("Could not found node with id=" + id + "."); return locJvmGrid.compute(grp); }
metas = nodeId == null ? task.call() : ignite.compute(ignite.cluster().forNodeId(nodeId)).call(task);
/** {@inheritDoc} */ @Override public boolean apply(UUID nodeId, String msg) { ignite.message(ignite.cluster().forNodeId(nodeId)).send(TOPIC.ORDERED, msg); return true; } }
/** * @throws Exception If failed. */ @Test public void testSynchronousExecute() throws Exception { UUID nodeId = ignite.cluster().localNode().id(); UUID rmtNodeId = rmtIgnite.cluster().localNode().id(); Collection<ClusterNode> locNodes = ignite.cluster().forNodeId(nodeId).nodes(); assert locNodes.size() == 1; assert locNodes.iterator().next().id().equals(nodeId); Collection<ClusterNode> rmtNodes = ignite.cluster().forNodeId(rmtNodeId).nodes(); assert rmtNodes.size() == 1; assert rmtNodes.iterator().next().id().equals(rmtNodeId); } }
loc ? qryTask.call() : ignite.compute(ignite.cluster().forNodeId(nodeId)).call(qryTask);
@Override public void run() { doSleep(1000); //spi1.failSend = false; cnt.getAndAdd(c1.compute(c1.cluster().forNodeId(c2.cluster().localNode().id())).call(new TestClosure())); } }, 1, "hang-thread");
/** * @param expNodes Expected nodes. * @param expJobs Expected jobs number per node. */ private void checkMetrics0(int expNodes, Map<UUID, Integer> expJobs) { List<Ignite> nodes = Ignition.allGrids(); assertEquals(expNodes, nodes.size()); assertEquals(expNodes, expJobs.size()); int totalJobs = 0; for (Integer c : expJobs.values()) totalJobs += c; for (final Ignite ignite : nodes) { ClusterMetrics m = ignite.cluster().metrics(); assertEquals(expNodes, m.getTotalNodes()); assertEquals(totalJobs, m.getTotalExecutedJobs()); for (Map.Entry<UUID, Integer> e : expJobs.entrySet()) { UUID nodeId = e.getKey(); ClusterGroup g = ignite.cluster().forNodeId(nodeId); ClusterMetrics nodeM = g.metrics(); assertEquals(e.getValue(), (Integer)nodeM.getTotalExecutedJobs()); } } }
int[] res = loc ? task.call() : ignite.compute(ignite.cluster().forNodeId(nodeId)).call(task);
ClusterGroup srvGrp = client.cluster().forNodeId(nodeId);
/** * @throws Exception If failed. */ @Test public void testMetrics() throws Exception { int NODES = 6; Ignite srv0 = startGridsMultiThreaded(NODES / 2); client = true; startGridsMultiThreaded(NODES / 2, NODES / 2); Map<UUID, Integer> expJobs = new HashMap<>(); for (int i = 0; i < NODES; i++) expJobs.put(nodeId(i), 0); checkMetrics(NODES, expJobs); for (int i = 0; i < NODES; i++) { UUID nodeId = nodeId(i); IgniteCompute c = srv0.compute(srv0.cluster().forNodeId(nodeId(i))); c.call(new DummyCallable(null)); expJobs.put(nodeId, 1); } }
/** * @throws Exception If failed. */ @Test public void testAsync() throws Exception { ClusterGroup empty = ignite(0).cluster().forNodeId(UUID.randomUUID()); assertEquals(0, empty.nodes().size()); IgniteCompute comp = ignite(0).compute(empty); checkFutureFails(comp.affinityRunAsync(DEFAULT_CACHE_NAME, 1, new FailRunnable())); checkFutureFails(comp.applyAsync(new FailClosure(), new Object())); checkFutureFails(comp.affinityCallAsync(DEFAULT_CACHE_NAME, 1, new FailCallable())); checkFutureFails(comp.broadcastAsync(new FailCallable())); }
loc ? qryTask.call() : ignite.compute(ignite.cluster().forNodeId(nodeId)).call(qryTask);
ClusterGroup empty = ignite(0).cluster().forNodeId(UUID.randomUUID());
IgniteFuture<Void> fut = igniteSrv.compute(igniteSrv.cluster().forNodeId(nodeId0, nodeId(1))) .broadcastAsync( new IgniteRunnable() {
/** * Fetch next results page. * * @throws SQLException On error. */ private void fetchPage() throws SQLException { JdbcConnection conn = (JdbcConnection)stmt.getConnection(); Ignite ignite = conn.ignite(); UUID nodeId = conn.nodeId(); boolean loc = nodeId == null; boolean updateMetadata = tbls == null; // Connections from new clients send queries with new tasks, so we have to continue in the same manner JdbcQueryTask qryTask = JdbcQueryTaskV3.createTask(loc ? ignite : null, conn.cacheName(), conn.schemaName(), null,true, loc, null, fetchSize, uuid, conn.isLocalQuery(), conn.isCollocatedQuery(), conn.isDistributedJoins(), conn.isEnforceJoinOrder(), conn.isLazy(), updateMetadata, false); try { JdbcQueryTaskResult res = loc ? qryTask.call() : ignite.compute(ignite.cluster().forNodeId(nodeId)).call(qryTask); finished = res.isFinished(); it = res.getRows().iterator(); if (updateMetadata) { tbls = res.getTbls(); cols = res.getCols(); types = res.getTypes(); } } catch (Exception e) { throw convertToSqlException(e, "Failed to query Ignite."); } }