/** {@inheritDoc} */ @Override public boolean isNodeInBaseline() { ClusterNode locNode = localNode(); if (locNode.isClient() || locNode.isDaemon()) return false; DiscoveryDataClusterState clusterState = ctx.state().clusterState(); return clusterState.hasBaselineTopology() && CU.baselineNode(locNode, clusterState); }
/** {@inheritDoc} */ @Override protected void beforeTestsStarted() throws Exception { ignite = startGrid(0); daemonNode = true; daemon = startGrid(1); assert ((IgniteKernal)daemon).localNode().isDaemon(); }
@Override public void run(int idx) throws Exception { for (int i = 0; i < cnt; i++) { String key = String.valueOf(i); GridCacheContext<String, Integer> ctx = ((IgniteKernal)ignite).<String, Integer>internalCache(cacheName).context(); GridCacheEntryEx entry = ctx.isNear() ? ctx.near().dht().peekEx(key) : ctx.cache().peekEx(key); if (ignite.affinity(cacheName).mapKeyToPrimaryAndBackups(key).contains(((IgniteKernal)ignite).localNode())) { assertNotNull(entry); assertTrue(entry.deleted()); } else assertNull(entry); } } }
/** {@inheritDoc} */ @Override public void run(int idx) throws Exception { for (int i = 0; i < cnt; i++) { String key = String.valueOf(i); GridCacheContext<String, Integer> ctx = ((IgniteKernal)ignite).<String, Integer>internalCache(DEFAULT_CACHE_NAME).context(); GridCacheEntryEx entry = ctx.isNear() ? ctx.near().dht().peekEx(key) : ctx.cache().peekEx(key); if (ignite.affinity(DEFAULT_CACHE_NAME).mapKeyToPrimaryAndBackups(key).contains(((IgniteKernal)ignite).localNode())) { assertNotNull(entry); assertTrue(entry.deleted()); } else assertNull(entry); } } }
int key1 = threadNum; log.info(">>> Performs put [node=" + ((IgniteKernal)ignite).localNode() + ", tx=" + tx + ", key=" + key1 + ']'); log.info(">>> Performs sleep. [node=" + ((IgniteKernal)ignite).localNode() + ", tx=" + tx + ']'); int key2 = threadNum + 1; log.info(">>> Performs put [node=" + ((IgniteKernal)ignite).localNode() + ", tx=" + tx + ", key2=" + key2 + ']');
@Override public void run() { int threadNum = threadCnt.getAndIncrement(); Ignite ignite = ignite(threadNum); IgniteCache<Integer, Integer> cache = ignite.cache(CACHE); try (Transaction tx = ignite.transactions().txStart(PESSIMISTIC, REPEATABLE_READ, timeout, 0)) { int key = 42; if (log.isDebugEnabled()) log.debug(">>> Performs put [node=" + ((IgniteKernal)ignite).localNode() + ", tx=" + tx + ", key=" + key + ']'); cache.put(key, 0); barrier.await(timeout + 1000, TimeUnit.MILLISECONDS); tx.commit(); } catch (Exception e) { if (hasCause(e, TransactionTimeoutException.class)) timedOut.set(true); if (hasCause(e, TransactionDeadlockException.class)) deadlock.set(true); } } }, 2, "tx-thread");
/** {@inheritDoc} */ @Override public void run(int idx) throws Exception { assertEquals(0, ((IgniteKernal)ignite).<String, Integer>internalCache(DEFAULT_CACHE_NAME).context().tm().idMapSize()); IgniteCache<Object, Object> cache = ignite.cache(DEFAULT_CACHE_NAME); ClusterNode node = ((IgniteKernal)ignite).localNode(); for (int k = 0; k < size; k++) { if (affinity(cache).isPrimaryOrBackup(node, k)) assertEquals("Check failed for node: " + node.id(), k, cache.localPeek(k, CachePeekMode.ONHEAP, CachePeekMode.OFFHEAP)); } } }
/** * Prints partition distribution for node. * * @param node Node to detect partitions for. */ private void printDistribution(IgniteKernal node) { List<Integer> primary = partitions(node, PARTITION_PRIMARY); List<Integer> backup = partitions(node, PARTITION_BACKUP); List<Integer> reader = partitions(node, PARTITION_READER); info(">>> Partitions distribution calculated [nodeId=" + node.localNode().id() + ", primary=" + primary + ", backup=" + backup + ", reader=" + reader + ']'); }
/** * Evaluate primary and backup keys. * * @param primaryKeyStart Value from need to start calculate primary key. * @param backupKeyStart Value from need to start calculate backup key. * @return Pair of result. The first result is found primary key. The second is found backup key. */ private IgnitePair<Integer> evaluatePrimaryAndBackupKeys(final int primaryKeyStart, final int backupKeyStart) { int primaryKey = primaryKeyStart; int backupKey = backupKeyStart; while (!client().affinity(CACHE_NAME).isPrimary(((IgniteKernal)primary()).localNode(), primaryKey)) primaryKey++; while (!client().affinity(CACHE_NAME).isBackup(((IgniteKernal)primary()).localNode(), backupKey) && backupKey < 100 + backupKeyStart) backupKey++; return new IgnitePair<>(primaryKey, backupKey); }
@Override public void run() { int num = threadCnt.getAndIncrement(); Ignite ignite = ignite(num); IgniteCache<Object, Integer> cache = ignite.cache(CACHE); try (Transaction tx = ignite.transactions().txStart(PESSIMISTIC, REPEATABLE_READ, num == 0 ? 500 : 1500, 0) ) { int key1 = primaryKey(ignite((num + 1) % txCnt).cache(CACHE)); log.info(">>> Performs put [node=" + ((IgniteKernal)ignite).localNode() + ", tx=" + tx + ", key=" + key1 + ']'); cache.put(new TestKey(key1), 1); barrier.await(); int key2 = primaryKey(cache); log.info(">>> Performs put [node=" + ((IgniteKernal)ignite).localNode() + ", tx=" + tx + ", key=" + key2 + ']'); cache.put(new TestKey(key2), 2); tx.commit(); } catch (Exception e) { timeout.compareAndSet(false, hasCause(e, TransactionTimeoutException.class)); deadlock.compareAndSet(false, hasCause(e, TransactionDeadlockException.class)); } } }, txCnt, "tx-thread");
int key1 = threadNum == 0 ? 0 : 1; log.info(">>> Performs put [node=" + ((IgniteKernal)ignite).localNode() + ", tx=" + tx + ", key=" + key1 + ", cache=" + cache1.getName() + ']'); log.info(">>> Performs put [node=" + ((IgniteKernal)ignite).localNode() + ", tx=" + tx + ", key=" + key2 + ", cache=" + cache2.getName() + ']');
/** * @throws Exception If failed. */ @Test public void testStopOnSegmentation() throws Exception { startGrids(NODES_CNT); IgniteEx ignite1 = grid(1); IgniteEx ignite2 = grid(2); ((TcpDiscoverySpi)ignite1.configuration().getDiscoverySpi()).brakeConnection(); ((TcpDiscoverySpi)ignite2.configuration().getDiscoverySpi()).brakeConnection(); waitForTopology(2); assertFalse(dfltFailureHndInvoked); Collection<ClusterNode> nodes = ignite1.cluster().forServers().nodes(); assertEquals(2, nodes.size()); assertTrue(nodes.containsAll(Arrays.asList(((IgniteKernal)ignite(0)).localNode(), ((IgniteKernal)ignite(1)).localNode()))); System.out.println(); }
int key1 = primaryKey(cache1); log.info(">>> Performs put [node=" + ((IgniteKernal)ignite).localNode() + ", tx=" + tx + ", key=" + key1 + ", cache=" + cache1.getName() + ']'); log.info(">>> Performs put [node=" + ((IgniteKernal)ignite).localNode() + ", tx=" + tx + ", key=" + key2 + ", cache=" + cache2.getName() + ']');
if (g.localNode().order() > 1) { try { sendMessages(g, threads, duration, outputFilename);
setNodeId = grid.localNode().id(); else fail("For collocated set all items should be stored on single node.");
/** * @throws Exception If failed. */ @Test public void testCollocation() throws Exception { IgniteQueue<Integer> queue = grid(0).queue("queue", 0, config(true)); for (int i = 0; i < 1000; i++) assertTrue(queue.add(i)); assertEquals(1000, queue.size()); GridCacheContext cctx = GridTestUtils.getFieldValue(queue, "cctx"); UUID setNodeId = null; for (int i = 0; i < gridCount(); i++) { IgniteKernal grid = (IgniteKernal)grid(i); GridCacheAdapter cache = grid.context().cache().internalCache(cctx.name()); Iterator<GridCacheMapEntry> entries = cache.map().entries(cache.context().cacheId()).iterator(); if (entries.hasNext()) { if (setNodeId == null) setNodeId = grid.localNode().id(); else fail("For collocated queue all items should be stored on single node."); } } }}
/** * @param cacheName Cache name. * @param expDone Expected rebalance finish flag. */ private void checkRebalance(String cacheName, boolean expDone) { for (Ignite node : G.allGrids()) { IgniteKernal node0 = (IgniteKernal)node; GridCacheAdapter cache = node0.context().cache().internalCache(cacheName); AffinityTopologyVersion topVer = node0.context().cache().context().exchange().readyAffinityVersion(); if (cache != null) assertEquals(expDone, cache.context().topology().rebalanceFinished(topVer)); else node0.context().discovery().cacheAffinityNode(node0.localNode(), cacheName); } }
/** * @throws Exception If failed. */ @Test public void testSendBadMessage() throws Exception { try { startGrids(2); Ignite ignite0 = grid(0); Ignite ignite1 = grid(1); ((IgniteKernal)ignite0).context().cache().context().io().addCacheHandler( 0, TestBadMessage.class, new CI2<UUID, GridCacheMessage>() { @Override public void apply(UUID nodeId, GridCacheMessage msg) { throw new RuntimeException("Test bad message exception"); } }); ((IgniteKernal)ignite1).context().cache().context().io().addCacheHandler( 0, TestBadMessage.class, new CI2<UUID, GridCacheMessage>() { @Override public void apply(UUID nodeId, GridCacheMessage msg) { throw new RuntimeException("Test bad message exception"); } }); ((IgniteKernal)ignite0).context().cache().context().io().send( ((IgniteKernal)ignite1).localNode().id(), new TestBadMessage(), (byte)2); boolean res = failureLatch.await(5, TimeUnit.SECONDS); assertTrue(res); } finally { stopAllGrids(); } }
/** * @param ignite Node. * @param cacheName Cache name. * @throws Exception If failed. */ private void checkNoCache(Ignite ignite, final String cacheName) throws Exception { GridCacheAdapter<Object, Object> cache = ((IgniteKernal)ignite).context().cache().internalCache(cacheName); assertNull("Unexpected cache on node " + ignite.name(), cache); final ClusterNode node = ((IgniteKernal)ignite).localNode(); for (Ignite ignite0 : Ignition.allGrids()) { final GridDiscoveryManager disco = ((IgniteKernal)ignite0).context().discovery(); if (ignite0 == ignite) assertFalse(ignite0.name(), disco.cacheNode(node, cacheName)); else { assertTrue(ignite0.name(), GridTestUtils.waitForCondition(new GridAbsPredicate() { @Override public boolean apply() { return !disco.cacheNode(node, cacheName); } }, 5000)); } assertFalse(disco.cacheAffinityNode(node, cacheName)); assertFalse(disco.cacheNearNode(node, cacheName)); } }
if (cache.affinity().isPrimaryOrBackup(grid.localNode(), key)) { if (cache instanceof GridNearCacheAdapter) cache = ((GridNearCacheAdapter<Object, Object>)cache).dht();