case "haEnabled": if (member.getValue() instanceof Boolean) { obj.setHAEnabled((Boolean)member.getValue());
protected void createNodes(int nodes) { startNodes(nodes, new VertxOptions().setHAEnabled(true)); aliveNodes = new CopyOnWriteArrayList<>(); for (int i = 0; i < nodes; i++) { aliveNodes.add(i); int pos = i; ((VertxInternal)vertices[i]).failoverCompleteHandler((nodeID, haInfo, succeeded) -> { failedOverOnto(pos); }); } deploymentSnapshots = new Set[nodes]; }
String haGroup = args.map.get("-hagroup"); int quorumSize = args.getInt("-quorum"); options.setHAEnabled(true); if (haGroup != null) { options.setHAGroup(haGroup);
protected Vertx startVertx(String haGroup, int quorumSize, boolean ha) throws Exception { VertxOptions options = new VertxOptions().setHAEnabled(ha).setClustered(true). setClusterHost("localhost").setClusterManager(getClusterManager()); if (ha) { options.setQuorumSize(quorumSize); if (haGroup != null) { options.setHAGroup(haGroup); } } CountDownLatch latch = new CountDownLatch(1); AtomicReference<Vertx> vertxRef = new AtomicReference<>(); clusteredVertx(options, onSuccess(vertx -> { vertxRef.set(vertx); latch.countDown(); })); latch.await(2, TimeUnit.MINUTES); return vertxRef.get(); }
@Test public void testSimpleFailover() throws Exception { startNodes(2, new VertxOptions().setHAEnabled(true)); DeploymentOptions options = new DeploymentOptions().setHa(true); JsonObject config = new JsonObject().put("foo", "bar"); options.setConfig(config); CountDownLatch latch = new CountDownLatch(1); vertices[0].deployVerticle("java:" + HAVerticle1.class.getName(), options, ar -> { assertTrue(ar.succeeded()); assertEquals(1, vertices[0].deploymentIDs().size()); assertEquals(0, vertices[1].deploymentIDs().size()); latch.countDown(); }); awaitLatch(latch); kill(0); assertWaitUntil(() -> vertices[1].deploymentIDs().size() == 1); checkDeploymentExists(1, "java:" + HAVerticle1.class.getName(), options); }
options.setHAEnabled(true); if (haGroup != null) { options.setHAGroup(haGroup);
case "haEnabled": if (member.getValue() instanceof Boolean) { obj.setHAEnabled((Boolean)member.getValue());
protected void createNodes(int nodes) { startNodes(nodes, new VertxOptions().setHAEnabled(true)); aliveNodes = new CopyOnWriteArrayList<>(); for (int i = 0; i < nodes; i++) { aliveNodes.add(i); int pos = i; ((VertxInternal)vertices[i]).failoverCompleteHandler((nodeID, haInfo, succeeded) -> { failedOverOnto(pos); }); } deploymentSnapshots = new Set[nodes]; }
String haGroup = args.map.get("-hagroup"); int quorumSize = args.getInt("-quorum"); options.setHAEnabled(true); if (haGroup != null) { options.setHAGroup(haGroup);
options.setMaxEventLoopExecuteTime(maxEventLoopExecuteTime); options.setMaxWorkerExecuteTime(maxWorkerExecuteTime); options.setHAEnabled(haEnabled); options.setFileResolverCachingEnabled(fileResolverCachingEnabled); options.setQuorumSize(quorumSize);
protected Vertx startVertx(String haGroup, int quorumSize, boolean ha) throws Exception { VertxOptions options = new VertxOptions().setHAEnabled(ha).setClustered(true). setClusterHost("localhost").setClusterManager(getClusterManager()); if (ha) { options.setQuorumSize(quorumSize); if (haGroup != null) { options.setHAGroup(haGroup); } } CountDownLatch latch = new CountDownLatch(1); AtomicReference<Vertx> vertxRef = new AtomicReference<>(); clusteredVertx(options, onSuccess(vertx -> { vertxRef.set(vertx); latch.countDown(); })); latch.await(2, TimeUnit.MINUTES); return vertxRef.get(); }
@Test public void testSimpleFailover() throws Exception { startNodes(2, new VertxOptions().setHAEnabled(true)); DeploymentOptions options = new DeploymentOptions().setHa(true); JsonObject config = new JsonObject().put("foo", "bar"); options.setConfig(config); CountDownLatch latch = new CountDownLatch(1); vertices[0].deployVerticle("java:" + HAVerticle1.class.getName(), options, ar -> { assertTrue(ar.succeeded()); assertEquals(1, vertices[0].deploymentIDs().size()); assertEquals(0, vertices[1].deploymentIDs().size()); latch.countDown(); }); awaitLatch(latch); kill(0); assertWaitUntil(() -> vertices[1].deploymentIDs().size() == 1); checkDeploymentExists(1, "java:" + HAVerticle1.class.getName(), options); }
options.setHAEnabled(true); if (haGroup != null) { options.setHAGroup(haGroup);
assertSame(mgr, options.getClusterManager()); assertFalse(options.isHAEnabled()); assertEquals(options, options.setHAEnabled(true)); assertTrue(options.isHAEnabled()); rand = TestUtils.randomPositiveInt();
options.setMaxEventLoopExecuteTime(maxEventLoopExecuteTime); options.setMaxWorkerExecuteTime(maxWorkerExecuteTime); options.setHAEnabled(haEnabled); options.setFileResolverCachingEnabled(fileResolverCachingEnabled); options.setQuorumSize(quorumSize);
@ConditionalOnMissingBean(VertxOptions.class) @Bean public VertxOptions vertxOptions(EventBusOptions eventBusOptions, MetricsOptions metricsOptions) { return new VertxOptions() .setBlockedThreadCheckInterval(properties.getBlockedThreadCheckInterval()) .setEventLoopPoolSize(properties.getEventLoopPoolSize()) .setWorkerPoolSize(properties.getWorkerPoolSize()) .setInternalBlockingPoolSize(properties.getInternalBlockingPoolSize()) .setQuorumSize(properties.getQuorumSize()) .setMaxEventLoopExecuteTime(properties.getMaxEventLoopExecuteTime()) .setHAGroup(properties.getHaGroup()) .setMaxWorkerExecuteTime(properties.getMaxWorkerExecuteTime()) .setWarningExceptionTime(properties.getWarningExceptionTime()) .setFileResolverCachingEnabled(properties.isFileResolverCachingEnabled()) .setHAEnabled(properties.isHaEnabled()) .setEventBusOptions(eventBusOptions) .setMetricsOptions(metricsOptions); }
assertSame(mgr, options.getClusterManager()); assertFalse(options.isHAEnabled()); assertEquals(options, options.setHAEnabled(true)); assertTrue(options.isHAEnabled()); rand = TestUtils.randomPositiveInt();