/** * @param cacheName Cache name. */ private void blockSupplySend(String cacheName) { for (Ignite node : G.allGrids()) blockSupplySend(TestRecordingCommunicationSpi.spi(node), cacheName); }
/** {@inheritDoc} */ @Override protected void execute() { for (IgniteEx server : baseline) { TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(server); spi.blockMessages(this::blockMessage); } block.run(); }
/** * */ private void stopBlock() { for (Ignite node : G.allGrids()) TestRecordingCommunicationSpi.spi(node).stopBlock(); }
/** {@inheritDoc} */ @Override void stop() throws Exception { for (IgniteEx server : baseline) { TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(server); spi.stopBlock(); } super.stop(); } }
/** * @param crd Exchange coordinator. * @param topVer Exchange topology version. */ private void blockExchangeFinish(Ignite crd, long topVer) { final AffinityTopologyVersion topVer0 = new AffinityTopologyVersion(topVer); TestRecordingCommunicationSpi.spi(crd).blockMessages(new IgniteBiPredicate<ClusterNode, Message>() { @Override public boolean apply(ClusterNode node, Message msg) { if (msg instanceof GridDhtPartitionsFullMessage) { GridDhtPartitionsFullMessage msg0 = (GridDhtPartitionsFullMessage)msg; return msg0.exchangeId() != null && msg0.exchangeId().topologyVersion().equals(topVer0); } return false; } }); }
/** * Method checks that there were no rebalance for all caches (excluding sys cache). */ private void checkNoRebalanceAfterRecovery() { int sysCacheGroupId = CU.cacheId(GridCacheUtils.UTILITY_CACHE_NAME); List<Ignite> nodes = G.allGrids(); for (final Ignite node : nodes) { TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(node); Set<Integer> mvccCaches = ((IgniteEx) node).context().cache().cacheGroups().stream() .flatMap(group -> group.caches().stream()) .filter(cache -> cache.config().getAtomicityMode() == CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT) .map(GridCacheContext::groupId) .collect(Collectors.toSet()); List<Integer> rebalancedGroups = spi.recordedMessages(true).stream() .map(msg -> (GridDhtPartitionDemandMessage) msg) .map(GridCacheGroupIdMessage::groupId) .filter(grpId -> grpId != sysCacheGroupId) //TODO: remove following filter when failover for MVCC will be fixed. .filter(grpId -> !mvccCaches.contains(grpId)) .distinct() .collect(Collectors.toList()); Assert.assertTrue("There was unexpected rebalance for some groups" + " [node=" + node.name() + ", groups=" + rebalancedGroups + ']', rebalancedGroups.isEmpty()); } }
/** * @throws Exception If failed. */ @Params(baseline = 9, atomicityMode = TRANSACTIONAL, cacheMode = REPLICATED) @Test public void testStopBaselineTransactionalReplicated() throws Exception { AtomicInteger cntDownCntr = new AtomicInteger(0); doTest( asMessagePredicate(discoEvt -> discoEvt.type() == EventType.EVT_NODE_LEFT), () -> { IgniteEx node = baseline.get(baseline.size() - cntDownCntr.get() - 1); TestRecordingCommunicationSpi.spi(node).stopBlock(); cntDownCntr.incrementAndGet(); for (int i = 0; i < cntDownCntr.get(); i++) cntFinishedReadOperations.countDown(); // This node and previously stopped nodes as well. stopGrid(node.name()); } ); }
TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(grid(1));
/** * @throws Exception If failed. */ @Params(baseline = 4, atomicityMode = TRANSACTIONAL, cacheMode = REPLICATED) @Test public void testRestartBaselineTransactionalReplicated() throws Exception { doTest( asMessagePredicate(discoEvt -> discoEvt.type() == EventType.EVT_NODE_JOINED), () -> { IgniteEx node = baseline.get(baseline.size() - 1); TestRecordingCommunicationSpi.spi(node).stopBlock(); stopGrid(node.name()); for (int i = 0; i < baselineServersCount() - 2; i++) cntFinishedReadOperations.countDown(); startGrid(node.name()); } ); }
/** * @param exp If {@code true} there should be recorded messages. */ private void checkRecordedMessages(boolean exp) { for (Ignite node : G.allGrids()) { List<Object> recorded = TestRecordingCommunicationSpi.spi(node).recordedMessages(false); if (exp) assertFalse(F.isEmpty(recorded)); else assertTrue(F.isEmpty(recorded)); } }
/** * Blocks sending full message from coordinator to non-coordinator node. * @param from Coordinator node. * @param to Non-coordinator node. */ private void blockSendingFullMessage(IgniteEx from, IgniteEx to) { // Block FullMessage for newly joined nodes. TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(from); // Delay sending full messages (without exchange id). spi.blockMessages((node, msg) -> { if (msg instanceof GridDhtPartitionsFullMessage) { GridDhtPartitionsFullMessage fullMsg = (GridDhtPartitionsFullMessage) msg; if (fullMsg.exchangeId() != null && node.order() == to.localNode().order()) { log.warning("Blocked sending " + msg + " to " + to.localNode()); return true; } } return false; }); }
/** * Tests that idle verify print partitions info when node failing. * * @throws Exception If failed. */ @Test public void testCacheIdleVerifyDumpWhenNodeFailing() throws Exception { Ignite ignite = startGrids(3); Ignite unstable = startGrid("unstable"); ignite.cluster().active(true); createCacheAndPreload(ignite, 100); for (int i = 0; i < 3; i++) { TestRecordingCommunicationSpi.spi(unstable).blockMessages(GridJobExecuteResponse.class, getTestIgniteInstanceName(i)); } injectTestSystemOut(); IgniteInternalFuture fut = GridTestUtils.runAsync(() -> { assertEquals(EXIT_CODE_OK, execute("--cache", "idle_verify", "--dump")); }); TestRecordingCommunicationSpi.spi(unstable).waitForBlocked(); UUID unstableNodeId = unstable.cluster().localNode().id(); unstable.close(); fut.get(); checkExceptionMessageOnReport(unstableNodeId); }
Ignite backup = backupNode(key, DEFAULT_CACHE_NAME); TestRecordingCommunicationSpi backupSpi = TestRecordingCommunicationSpi.spi(backup); backupSpi.blockMessages(GridDhtTxPrepareResponse.class, primary.name());
/** * Test checks that delayed full messages are processed correctly in case of changed coordinator. * * @throws Exception If failed. */ @Test public void testDelayedFullMessageReplacedIfCoordinatorChanged() throws Exception { spiFactory = TestRecordingCommunicationSpi::new; IgniteEx crd = startGrid("crd"); IgniteEx newCrd = startGrid(1); IgniteEx problemNode = startGrid(2); crd.cluster().active(true); awaitPartitionMapExchange(); blockSendingFullMessage(crd, problemNode); IgniteInternalFuture joinNextNodeFut = GridTestUtils.runAsync(() -> startGrid(3)); joinNextNodeFut.get(); U.sleep(5000); blockSendingFullMessage(newCrd, problemNode); IgniteInternalFuture stopCoordinatorFut = GridTestUtils.runAsync(() -> stopGrid("crd")); stopCoordinatorFut.get(); U.sleep(5000); TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(newCrd); spi.stopBlock(true); awaitPartitionMapExchange(); }
/** * @param atomicityMode Cache atomicity mode. * @throws Exception If failed. */ private void clientStartCoordinatorFails(CacheAtomicityMode atomicityMode) throws Exception { Ignite srv0 = startGrids(3); final int KEYS = 500; IgniteCache<Object, Object> cache = srv0.createCache(cacheConfiguration(DEFAULT_CACHE_NAME, atomicityMode, 1)); for (int i = 0; i < KEYS; i++) cache.put(i, i); client = true; final Ignite c = startGrid(3); TestRecordingCommunicationSpi.spi(srv0).blockMessages(GridDhtAffinityAssignmentResponse.class, c.name()); IgniteInternalFuture<?> fut = GridTestUtils.runAsync(new Callable<Void>() { @Override public Void call() throws Exception { c.cache(DEFAULT_CACHE_NAME); return null; } }, "start-cache"); U.sleep(1000); assertFalse(fut.isDone()); stopGrid(0); fut.get(); cache = c.cache(DEFAULT_CACHE_NAME); for (int i = 0; i < KEYS; i++) { assertEquals(i, cache.get(i)); cache.put(i, i + 1); assertEquals(i + 1, cache.get(i)); } }
}); TestRecordingCommunicationSpi spi1 = TestRecordingCommunicationSpi.spi(ignite(1));
}); TestRecordingCommunicationSpi spi1 = TestRecordingCommunicationSpi.spi(ignite(1));