/** {@inheritDoc} */ @Override public boolean apply() { return g0.cluster().nodes().size() == 1; } }, getTestTimeout());
/** {@inheritDoc} */ @Override protected VisorBaselineTaskResult run(@Nullable Void arg) throws IgniteException { IgniteClusterEx cluster = ignite.cluster(); return new VisorBaselineTaskResult( ignite.cluster().active(), cluster.topologyVersion(), cluster.currentBaselineTopology(), cluster.forServers().nodes() ); }
/** {@inheritDoc} */ @Override protected void beforeTest() throws Exception { afterPutEvts.clear(); afterRmvEvts.clear(); for (int i = 0; i < NODES; i++) { afterRmvEvts.put(grid(i).cluster().localNode().id(), new BlockingArrayQueue<Cache.Entry<TestKey, TestValue>>()); afterPutEvts.put(grid(i).cluster().localNode().id(), new BlockingArrayQueue<Cache.Entry<TestKey, TestValue>>()); } }
/** * Set baseline by topology version. * * @param targetVer Target topology version. * @return New baseline. */ private VisorBaselineTaskResult version(long targetVer) { IgniteClusterEx cluster = ignite.cluster(); if (targetVer > cluster.topologyVersion()) throw new IllegalArgumentException("Topology version is ahead of time: " + targetVer); cluster.setBaselineTopology(targetVer); return collect(); }
/** * Collect baseline topology command result. * * @return Baseline descriptor. */ private GridBaselineCommandResponse currentState() { IgniteClusterEx cluster = ctx.grid().cluster(); Collection<? extends BaselineNode> srvrs = cluster.forServers().nodes(); return new GridBaselineCommandResponse(cluster.active(), cluster.topologyVersion(), currentBaseLine(), srvrs); }
/** * @throws Exception if failed. */ @Test public void testRebalancingWithTtlExpirable() throws Exception { IgniteEx srv = startGrid(0); srv.cluster().active(true); fillCache(srv.cache(CACHE_NAME)); srv = startGrid(1); //causes rebalancing start srv.cluster().setBaselineTopology(srv.cluster().topologyVersion()); final IgniteCache<Integer, byte[]> cache = srv.cache(CACHE_NAME); pringStatistics((IgniteCacheProxy)cache, "After rebalancing start"); waitAndCheckExpired(cache); stopAllGrids(); }
/** {@inheritDoc} */ @Override protected Collection<VisorServiceDescriptor> run(final Void arg) { Collection<VisorServiceDescriptor> res = new ArrayList<>(); if (ignite.cluster().active()) { Collection<ServiceDescriptor> services = ignite.services().serviceDescriptors(); for (ServiceDescriptor srvc : services) res.add(new VisorServiceDescriptor(srvc)); } return res; }
ctx.grid().cluster().setBaselineTopology(nodes); ctx.grid().cluster().disableWal(reader.readString()); ctx.grid().cluster().enableWal(reader.readString()); ctx.grid().cluster().setTxTimeoutOnPartitionMapExchange(reader.readLong()); return ctx.grid().cluster().isWalEnabled(reader.readString()) ? TRUE : FALSE;
/** * @throws Exception If failed. */ @Test public void testDataClearedAfterRestartWithDisabledWal() throws Exception { Assume.assumeFalse("https://issues.apache.org/jira/browse/IGNITE-10421", MvccFeatureChecker.forcedMvcc()); Ignite ignite = startGrid(0); ignite.cluster().active(true); IgniteCache<Integer, Integer> cache = ignite.cache(DEFAULT_CACHE_NAME); int keysCnt = getKeysCount(); for (int k = 0; k < keysCnt; k++) cache.put(k, k); IgniteEx newIgnite = startGrid(1); newIgnite.cluster().setBaselineTopology(2); // Await fully exchange complete. awaitExchange(newIgnite); CacheGroupContext grpCtx = newIgnite.cachex(DEFAULT_CACHE_NAME).context().group(); assertFalse(grpCtx.localWalEnabled()); stopGrid(1); stopGrid(0); newIgnite = startGrid(1); newIgnite.cluster().active(true); newIgnite.cluster().setBaselineTopology(newIgnite.cluster().nodes()); cache = newIgnite.cache(DEFAULT_CACHE_NAME); for (int k = 0; k < keysCnt; k++) assertFalse("k=" + k +", v=" + cache.get(k), cache.containsKey(k)); }
/** */ private Collection<BaselineNode> baselineNodes() { Collection<ClusterNode> srvNodes = cluster().forServers().nodes(); ArrayList baselineNodes = new ArrayList(srvNodes.size()); for (ClusterNode clN : srvNodes) baselineNodes.add(clN); return baselineNodes; }
/** * @param content Content to check. * @param baselineSz Expected baseline size. * @param srvsSz Expected server nodes count. */ private void assertBaseline(String content, int baselineSz, int srvsSz) throws IOException { assertNotNull(content); assertFalse(content.isEmpty()); JsonNode node = JSON_MAPPER.readTree(content); assertEquals(STATUS_SUCCESS, node.get("successStatus").asInt()); assertTrue(node.get("error").isNull()); assertNotSame(securityEnabled(), node.get("sessionToken").isNull()); JsonNode res = node.get("response"); assertFalse(res.isNull()); GridBaselineCommandResponse baseline = JSON_MAPPER.treeToValue(res, GridBaselineCommandResponse.class); assertTrue(baseline.isActive()); assertEquals(grid(0).cluster().topologyVersion(), baseline.getTopologyVersion()); assertEquals(baselineSz, baseline.getBaseline().size()); assertEqualsCollections(nodeConsistentIds(grid(0).cluster().currentBaselineTopology()), baseline.getBaseline()); assertEquals(srvsSz, baseline.getServers().size()); assertEqualsCollections(nodeConsistentIds(grid(0).cluster().nodes()), baseline.getServers()); }
/** * @throws Exception if failed. */ @Test public void testClusterActiveWhileBaselineChanging() throws Exception { startGrids(NODE_COUNT); IgniteEx ig = grid(0); ig.cluster().active(true); assertTrue(ig.cluster().active()); startGrid(NODE_COUNT); IgniteInternalFuture fut = GridTestUtils.runAsync(() -> { try { U.sleep(100); } catch (IgniteInterruptedCheckedException e) { e.printStackTrace(); } ig.cluster().setBaselineTopology(NODE_COUNT + 1); }); while (!fut.isDone()) { assertTrue(grid(0).cluster().active()); assertTrue(grid(0).context().state().publicApiActiveState(false)); assertTrue(grid(NODE_COUNT).cluster().active()); assertTrue(grid(NODE_COUNT).context().state().publicApiActiveState(false)); } assertNull(String.valueOf(fut.error()), fut.error()); assertEquals(NODE_COUNT + 1, ig.cluster().currentBaselineTopology().size()); }
crd.cluster().active(true); crd.cluster().active(true); crd.cluster().disableWal(CACHE_NAME); crd.cluster().enableWal(CACHE_NAME); .walRebalanceVersions(grpId); Assert.assertFalse(topVers.contains(ignite.cluster().topologyVersion())); .walRebalanceVersions(grpId); Assert.assertTrue(topVers.contains(ignite.cluster().topologyVersion()));
/** * Set new baseline. * * @param baselineTop Collection of baseline node. * @return Baseline descriptor. */ private VisorBaselineTaskResult set0(Collection<BaselineNode> baselineTop) { ignite.cluster().setBaselineTopology(baselineTop); return collect(); }
ignite0.cluster().active(true); log.info(String.format(">>> State #0: topology version = %d", ignite0.cluster().topologyVersion())); assertEquals(2, mxBeanCluster.getTotalBaselineNodes()); assertEquals(2, mxBeanCluster.getActiveBaselineNodes()); assertEquals(2, (baselineNodes = ignite0.cluster().currentBaselineTopology()) != null ? baselineNodes.size() : 0); log.info(String.format(">>> State #1: topology version = %d", ignite0.cluster().topologyVersion())); assertEquals(2, mxBeanCluster.getTotalBaselineNodes()); assertEquals(1, mxBeanCluster.getActiveBaselineNodes()); assertEquals(2, (baselineNodes = ignite0.cluster().currentBaselineTopology()) != null ? baselineNodes.size() : 0); log.info(String.format(">>> State #2: topology version = %d", ignite0.cluster().topologyVersion())); assertEquals(2, mxBeanCluster.getActiveBaselineNodes()); assertEquals(1, mxBeanLocalNode1.getTotalBaselineNodes()); assertEquals(2, (baselineNodes = ignite0.cluster().currentBaselineTopology()) != null ? baselineNodes.size() : 0);
@SuppressWarnings({"BusyWait"}) @Override public Object call() throws Exception { while (!done.get()) { if (lastOpChangeUp) { //need to do change down: stop node, set new BLT without it stopGrid(gridCount()); lastOpChangeUp = false; } else { startGrid(gridCount()); lastOpChangeUp = true; } grid(0).cluster().setBaselineTopology(baselineNodes(grid(0).cluster().forServers().nodes())); Thread.sleep(baselineTopChangeInterval); int c = restartCnt.incrementAndGet(); if (c % logFreq == 0) info("BaselineTopology changes: " + c); } return true; } }, 1, "restart-thread");
/** * @throws Exception If fail. */ @Test public void testWalRenameDirSimple() throws Exception { IgniteEx ignite = startGrid(1); ignite.cluster().active(true); IgniteCache<Object, Object> cache = ignite.cache(CACHE_NAME); for (int i = 0; i < 100; i++) cache.put(i, new IndexedObject(i)); final Object consistentId = ignite.cluster().localNode().consistentId(); stopGrid(1); final File cacheDir = cacheDir(CACHE_NAME, consistentId.toString()); renamed = cacheDir.renameTo(new File(cacheDir.getParent(), "cache-" + RENAMED_CACHE_NAME)); assert renamed; ignite = startGrid(1); ignite.cluster().active(true); cache = ignite.cache(RENAMED_CACHE_NAME); for (int i = 0; i < 100; i++) assertEquals(new IndexedObject(i), cache.get(i)); }
/** * */ private void doFailNodes(boolean simulateNodeFailure) throws Exception { startGrids(5); grid(0).events().enabledEvents(); failedNodes = new HashSet<>(Arrays.asList(grid(3).cluster().localNode(), grid(4).cluster().localNode())); CountDownLatch latch = new CountDownLatch(failedNodes.size()); grid(0).events().localListen(e -> { DiscoveryEvent evt = (DiscoveryEvent)e; if (failedNodes.contains(evt.eventNode())) latch.countDown(); return true; }, EventType.EVT_NODE_FAILED); compromisedNode = (TcpDiscoveryNode)grid(2).localNode(); for (int i = 3; i < 5; i++) failedAddrs.addAll(((TcpDiscoveryNode)grid(i).localNode()).socketAddresses()); System.out.println(">> Start failing nodes"); forceFailConnectivity = true; if (simulateNodeFailure) { for (int i = 3; i < 5; i++) ((TcpDiscoverySpi)grid(i).configuration().getDiscoverySpi()).simulateNodeFailure(); } assert latch.await(waitTime(), TimeUnit.MILLISECONDS); assertEquals(3, grid(0).cluster().forServers().nodes().size()); }
/** * @throws Exception If failed. */ @Test public void testBaselineSet() throws Exception { int sz = gridCount(); assertBaseline(content(null, GridRestCommand.BASELINE_CURRENT_STATE), sz, sz); startGrid(sz); assertBaseline(content(null, GridRestCommand.BASELINE_CURRENT_STATE), sz, sz + 1); assertBaseline(content(null, GridRestCommand.BASELINE_SET, "topVer", String.valueOf(grid(0).cluster().topologyVersion())), sz + 1, sz + 1); stopGrid(sz); assertBaseline(content(null, GridRestCommand.BASELINE_CURRENT_STATE), sz + 1, sz); assertBaseline(content(null, GridRestCommand.BASELINE_SET, "topVer", String.valueOf(grid(0).cluster().topologyVersion())), sz, sz); startGrid(sz); assertBaseline(content(null, GridRestCommand.BASELINE_CURRENT_STATE), sz, sz + 1); ArrayList<String> params = new ArrayList<>(); int i = 1; for (BaselineNode n : grid(0).cluster().nodes()) { params.add("consistentId" + i++); params.add(String.valueOf(n.consistentId())); } assertBaseline(content(null, GridRestCommand.BASELINE_SET, params.toArray(new String[0])), sz + 1, sz + 1); stopGrid(sz); }
/** {@inheritDoc} */ @Override protected void beforeTest() throws Exception { cleanPersistenceDir(); startGrids(gridCount()); if (withClientNearCache()) { client = true; startGrid(gridCount()); client = false; } assert gridCount() > 0; final IgniteClusterEx cluster = grid(0).cluster(); if (log.isInfoEnabled()) log.info("BTL before activation: " + cluster.currentBaselineTopology()); cluster.active(true); if (log.isInfoEnabled()) log.info("BTL after activation: " + cluster.currentBaselineTopology()); awaitPartitionMapExchange(); }