@Override public Void call() throws Exception { latch.await(); Ignite g = startGrid("client-" + clientIdx.getAndIncrement()); clientNodeIds.add(g.cluster().localNode().id()); return null; } }, CLIENTS, "start-client");
/** * @param cacheName Cache name. */ private int mapped(String cacheName) { int mapped = 0; ClusterGroup srvs = ignite.cluster().forServers(); Collection<ClusterNode> nodes = srvs.forDataNodes(cacheName).nodes(); for (ClusterNode node : nodes) mapped += ignite.affinity(cacheName).primaryPartitions(node).length; return mapped; }
/** {@inheritDoc} */ @Override public boolean apply(UUID nodeId, Object msg) { ignite.log().info("Received message [nodeId=" + nodeId + ", locNodeId=" + ignite.cluster().localNode().id() + ", msg=" + msg + ']'); ConcurrentMap<String, AtomicInteger> map = ignite.cluster().nodeLocalMap(); AtomicInteger cnt = map.get("msgCnt"); if (cnt == null) { AtomicInteger old = map.putIfAbsent("msgCnt", cnt = new AtomicInteger(0)); if (old != null) cnt = old; } cnt.incrementAndGet(); return true; } }
/** * Sends optional message. If message is {@code null} - it's no-op. * * @param nodeId ID of the node to send message to. * @param respMsg Message to send. * @throws IgniteException Thrown in case of any errors. */ private void send(UUID nodeId, @Nullable Object respMsg) throws IgniteException { assert nodeId != null; if (respMsg != null) { ClusterNode node = ignite.cluster().node(nodeId); if (node != null) ignite.message(ignite.cluster().forNode(node)).send(null, respMsg); // Can still fail. else throw new IgniteException("Failed to send message since destination node has " + "left topology (ignoring) [nodeId=" +nodeId + ", respMsg=" + respMsg + ']'); } }
/** * Tries to set the global cleanup node id to current node. * * @param grid Grid. * @param metaCache Meta cache. * * @return True if successfully set the flag indicating that current node performs the cleanup; otherwise false. */ private boolean trySetGlobalCleanupFlag(Ignite grid, final Cache<CleanupNodeId, UUID> metaCache) { final UUID localNodeId = grid.cluster().localNode().id(); while (true) { // Get the node performing cleanup. UUID nodeId = metaCache.get(CLEANUP_NODE_ID); if (nodeId == null) { if (metaCache.putIfAbsent(CLEANUP_NODE_ID, localNodeId)) return true; // Successfully reserved cleanup to local node. // Failed putIfAbsent: someone else may have started cleanup. Retry the check. continue; } if (nodeId.equals(localNodeId)) return false; // Current node already performs cleanup. if (grid.cluster().node(nodeId) != null) return false; // Another node already performs cleanup and is alive. // Node that performs cleanup has disconnected. if (metaCache.replace(CLEANUP_NODE_ID, nodeId, localNodeId)) return true; // Successfully replaced disconnected node id with our id. // Replace failed: someone else started cleanup. return false; } }
/** * @throws Exception If failed. */ @Test public void testClientServer() throws Exception { ClusterGroup srv = ignite.cluster().forServers(); assertEquals(2, srv.nodes().size()); assertTrue(srv.nodes().contains(ignite(0).cluster().localNode())); assertTrue(srv.nodes().contains(ignite(1).cluster().localNode())); ClusterGroup cli = ignite.cluster().forClients(); assertEquals(2, srv.nodes().size()); assertTrue(cli.nodes().contains(ignite(2).cluster().localNode())); assertTrue(cli.nodes().contains(ignite(3).cluster().localNode())); }
/** {@inheritDoc} */ @Override public Collection<ClusterNode> call() throws Exception { return cluster().nodes(); } }
/** * @throws Exception If failed. */ @Test public void testSynchronousExecute() throws Exception { UUID nodeId = ignite.cluster().localNode().id(); UUID rmtNodeId = rmtIgnite.cluster().localNode().id(); Collection<ClusterNode> locNodes = ignite.cluster().forNodeId(nodeId).nodes(); assert locNodes.size() == 1; assert locNodes.iterator().next().id().equals(nodeId); Collection<ClusterNode> rmtNodes = ignite.cluster().forNodeId(rmtNodeId).nodes(); assert rmtNodes.size() == 1; assert rmtNodes.iterator().next().id().equals(rmtNodeId); } }
/** * @throws Exception If failed. */ @Test public void testScanQuery() throws Exception { Ignite server = startGrid(0); client = true; ccfgs = new CacheConfiguration[] { new CacheConfiguration("test-cache-replicated").setCacheMode(REPLICATED) .setNodeFilter(new AlwaysFalseCacheFilter()), new CacheConfiguration("test-cache-partitioned").setCacheMode(PARTITIONED) .setNodeFilter(new AlwaysFalseCacheFilter()) }; Ignite client = startGrid(1); assertEquals(2, server.cluster().nodes().size()); assertEquals(1, server.cluster().forServers().nodes().size()); assertEquals(1, server.cluster().forClients().nodes().size()); assertEquals(2, client.cluster().nodes().size()); assertEquals(1, client.cluster().forServers().nodes().size()); assertEquals(1, client.cluster().forClients().nodes().size()); for (CacheConfiguration cfg : ccfgs) { IgniteCache<Object, Object> cache = client.cache(cfg.getName()); assertNotNull(cache); assertNotNull(cache.iterator()); assertFalse(cache.iterator().hasNext()); } }
/** * Formats Server Address specification so that TensorFlow accepts it. * * @param ignite Ignite instance. * @return Formatted server address specification. */ public String format(Ignite ignite) { Collection<String> names = ignite.cluster().forNodeId(nodeId).hostNames(); return names.iterator().next() + ":" + port; }
/** * Remove data from local cache by Dataset ID. * * @param ignite Ignite instance. * @param datasetId Dataset ID. */ public static void removeData(Ignite ignite, UUID datasetId) { ignite.cluster().nodeLocalMap().remove(String.format(DATA_STORAGE_KEY_TEMPLATE, datasetId)); }
/** {@inheritDoc} */ @Override public boolean apply(Event evt) { try { int[] res = new int[] { System.identityHashCode(getClass().getClassLoader()) }; ignite.message(ignite.cluster().forRemotes()).send(null, res); } catch (IgniteException e) { throw new RuntimeException(e); } return true; } }
/** * Server sends a message and client receives it. * * @param async Async message send flag. * @throws Exception If failed. */ private void serverClientMessage(boolean async) throws Exception { Ignite ignite = grid(SERVER_NODE_IDX); ClusterGroup grp = ignite.cluster().forClients(); assert !grp.nodes().isEmpty(); registerListenerAndSendMessages(ignite, grp, async); }
assertEquals(0, node.cluster().forClients().nodes().size()); assertEquals(1, node.cluster().forServers().nodes().size()); assertEquals(1, node.cluster().forClients().nodes().size()); assertEquals(1, node.cluster().forServers().nodes().size()); assertEquals(2, node.cluster().forClients().nodes().size()); assertEquals(2, node.cluster().forServers().nodes().size()); assertEquals(1, node.cluster().forClients().nodes().size()); assertEquals(2, node.cluster().forServers().nodes().size()); assertEquals(1, node.cluster().forClients().nodes().size()); assertEquals(1, node.cluster().forServers().nodes().size());
/** * Activates grid if necessary and wait for partition map exchange. */ private void afterGridStarted() throws InterruptedException { G.allGrids().stream() .filter(g -> !g.cluster().node().isClient()) .findAny() .filter(g -> !g.cluster().active()) .ifPresent(g -> g.cluster().active(true)); awaitPartitionMapExchange(); }
/** * @throws Exception If fail. */ private void orderedMessage() throws Exception { Ignite ignite = grid(SERVER_NODE_IDX); ClusterGroup grp = gridCount() > 1 ? ignite.cluster().forRemotes() : ignite.cluster().forLocal(); assert !grp.nodes().isEmpty(); registerListenerAndSendOrderedMessages(ignite, grp); }