@PostConstruct void init() throws ExecutionException, InterruptedException { VertxOptions options = new VertxOptions() .setClusterManager(new HazelcastClusterManager(hazelcastInstance)); CompletableFuture<Vertx> future = new CompletableFuture<>(); Vertx.clusteredVertx(options, ar -> { if (ar.succeeded()) { future.complete(ar.result()); } else { future.completeExceptionally(ar.cause()); } }); vertx = future.get(); }
/** * Specifies Vert.x instance configuration, this is essential for clustering on multiple separate machines and * Docker containers in order to make Event Bus send/consume messages appropriately * @param clusterManager represents the cluster manager * @return VertxOptions object to be used in deployment */ private static VertxOptions configureVertx(ClusterManager clusterManager) { /* the default value of the cluster host (localhost) is used here, if we want to have multiple machines/docker containers in our cluster we must configure the cluster host properly on each node in order for the event bus to send/consume messages properly between our verticles, to do so we use the method setClusterHost and give it this node's IP. For example: options.setClusterHost(192.168.1.12); */ VertxOptions options = new VertxOptions() .setClustered(true) .setClusterManager(clusterManager); return options; } }
/** * Specifies Vert.x instance configuration, this is essential for clustering on multiple separate machines and * Docker containers in order to make Event Bus send/consume messages appropriately * @param clusterManager represents the cluster manager * @return VertxOptions object to be used in deployment */ private static VertxOptions configureVertx(ClusterManager clusterManager) { /* the default value of the cluster host (localhost) is used here, if we want to have multiple machines/docker containers in our cluster we must configure the cluster host properly on each node in order for the event bus to send/consume messages properly between our verticles, to do so we use the method setClusterHost and give it this node's IP. For example: options.setClusterHost(192.168.1.12); */ VertxOptions options = new VertxOptions() .setClustered(true) .setClusterManager(clusterManager); return options; } }
@Test public void testCallbackInvokedOnFailure() throws Exception { // will trigger java.net.UnknownHostException String hostName = "zoom.zoom.zen.tld"; VertxOptions options = new VertxOptions() .setClusterManager(new FakeClusterManager()) .setClusterHost(hostName); AtomicReference<AsyncResult<Vertx>> resultRef = new AtomicReference<>(); CountDownLatch latch = new CountDownLatch(1); Vertx.clusteredVertx(options, ar -> { resultRef.set(ar); latch.countDown(); }); awaitLatch(latch); assertFalse(resultRef.get() == null); assertTrue(resultRef.get().failed()); assertTrue("Was expecting failure to be an instance of UnknownHostException", resultRef.get().cause() instanceof UnknownHostException); } }
protected void startNodes(int numNodes, VertxOptions options) { CountDownLatch latch = new CountDownLatch(numNodes); vertices = new Vertx[numNodes]; for (int i = 0; i < numNodes; i++) { int index = i; clusteredVertx(options.setClusterHost("localhost").setClusterPort(0).setClustered(true) .setClusterManager(getClusterManager()), ar -> { try { if (ar.failed()) { ar.cause().printStackTrace(); } assertTrue("Failed to start node", ar.succeeded()); vertices[index] = ar.result(); } finally { latch.countDown(); } }); } try { assertTrue(latch.await(2, TimeUnit.MINUTES)); } catch (InterruptedException e) { fail(e.getMessage()); } }
protected Vertx startVertx(String haGroup, int quorumSize, boolean ha) throws Exception { VertxOptions options = new VertxOptions().setHAEnabled(ha).setClustered(true). setClusterHost("localhost").setClusterManager(getClusterManager()); if (ha) { options.setQuorumSize(quorumSize); if (haGroup != null) { options.setHAGroup(haGroup); } } CountDownLatch latch = new CountDownLatch(1); AtomicReference<Vertx> vertxRef = new AtomicReference<>(); clusteredVertx(options, onSuccess(vertx -> { vertxRef.set(vertx); latch.countDown(); })); latch.await(2, TimeUnit.MINUTES); return vertxRef.get(); }
@Test public void testCreateClusteredVertxAsyncDetectJoinFailure() { VertxOptions options = new VertxOptions().setClusterManager(new FakeClusterManager(){ @Override public void join(Handler<AsyncResult<Void>> resultHandler) { resultHandler.handle(Future.failedFuture(new Exception("joinfailure"))); } }); clusteredVertx(options, ar -> { assertTrue(ar.failed()); assertEquals("joinfailure", ar.cause().getMessage()); testComplete(); }); await(); } }
@Test public void testCallbackInvokedOnFailure() throws Exception { // will trigger java.net.UnknownHostException String hostName = "zoom.zoom.zen.tld"; VertxOptions options = new VertxOptions() .setClusterManager(new FakeClusterManager()) .setClusterHost(hostName); AtomicReference<AsyncResult<Vertx>> resultRef = new AtomicReference<>(); CountDownLatch latch = new CountDownLatch(1); Vertx.clusteredVertx(options, ar -> { resultRef.set(ar); latch.countDown(); }); awaitLatch(latch); assertFalse(resultRef.get() == null); assertTrue(resultRef.get().failed()); assertTrue("Was expecting failure to be an instance of UnknownHostException", resultRef.get().cause() instanceof UnknownHostException); } }
protected void startNodes(int numNodes, VertxOptions options) { CountDownLatch latch = new CountDownLatch(numNodes); vertices = new Vertx[numNodes]; for (int i = 0; i < numNodes; i++) { int index = i; clusteredVertx(options.setClusterHost("localhost").setClusterPort(0).setClustered(true) .setClusterManager(getClusterManager()), ar -> { try { if (ar.failed()) { ar.cause().printStackTrace(); } assertTrue("Failed to start node", ar.succeeded()); vertices[index] = ar.result(); } finally { latch.countDown(); } }); } try { assertTrue(latch.await(2, TimeUnit.MINUTES)); } catch (InterruptedException e) { fail(e.getMessage()); } }
protected Vertx startVertx(String haGroup, int quorumSize, boolean ha) throws Exception { VertxOptions options = new VertxOptions().setHAEnabled(ha).setClustered(true). setClusterHost("localhost").setClusterManager(getClusterManager()); if (ha) { options.setQuorumSize(quorumSize); if (haGroup != null) { options.setHAGroup(haGroup); } } CountDownLatch latch = new CountDownLatch(1); AtomicReference<Vertx> vertxRef = new AtomicReference<>(); clusteredVertx(options, onSuccess(vertx -> { vertxRef.set(vertx); latch.countDown(); })); latch.await(2, TimeUnit.MINUTES); return vertxRef.get(); }
@Test public void testCreateClusteredVertxAsyncDetectJoinFailure() { VertxOptions options = new VertxOptions().setClusterManager(new FakeClusterManager(){ @Override public void join(Handler<AsyncResult<Void>> resultHandler) { resultHandler.handle(Future.failedFuture(new Exception("joinfailure"))); } }); clusteredVertx(options, ar -> { assertTrue(ar.failed()); assertEquals("joinfailure", ar.cause().getMessage()); testComplete(); }); await(); } }
@Override public void setUp() throws Exception { super.setUp(); VertxOptions options = new VertxOptions(); options.setClustered(true); options.setClusterManager(getClusterManager()); startNodes(numNodes, options); store = ClusteredSessionStore.create(vertices[0], 3000); }
assertEquals(options, options.setClusterManager(mgr)); assertSame(mgr, options.getClusterManager()); assertFalse(options.isHAEnabled());
public void example3(CuratorFramework curator) { ClusterManager mgr = new ZookeeperClusterManager(curator); VertxOptions options = new VertxOptions().setClusterManager(mgr); Vertx.clusteredVertx(options, res -> { if (res.succeeded()) { Vertx vertx = res.result(); } else { // failed! } }); } }
public void example1() { ClusterManager mgr = new ZookeeperClusterManager(); VertxOptions options = new VertxOptions().setClusterManager(mgr); Vertx.clusteredVertx(options, res -> { if (res.succeeded()) { Vertx vertx = res.result(); } else { // failed! } }); }
VertxOptions options = new VertxOptions(); Config config = new Config(); NetworkConfig networkConfig = config.getNetworkConfig(); networkConfig.getInterfaces().setEnabled(true).addInterface(args[0]); options.setClusterManager(new HazelcastClusterManager(config)); options.setClustered(true); options.setClusterHost(args[0]); Vertx.clusteredVertx(options, res -> { Vertx vertx = res.result(); vertx.deployVerticle(new Receiver()); });
@Before public void setUp() { Random random = new Random(); System.setProperty("vertx.infinispan.test.auth.token", new BigInteger(128, random).toString(32)); VertxOptions options = new VertxOptions() .setClusterManager(new InfinispanClusterManager()); Vertx.clusteredVertx(options, ar -> { vertx = ar.result(); }); await().until(() -> vertx != null); discovery = new DiscoveryImpl(vertx, new ServiceDiscoveryOptions()); } }
@Override public void setUp() throws Exception { super.setUp(); VertxOptions options = new VertxOptions(); options.setClustered(true); options.setClusterManager(getClusterManager()); startNodes(numNodes, options); store = ClusteredSessionStore.create(vertices[0], 3000); }
private void testProgrammatic(HazelcastClusterManager mgr, Config config) throws Exception { mgr.setConfig(config); assertEquals(config, mgr.getConfig()); VertxOptions options = new VertxOptions().setClusterManager(mgr).setClustered(true); Vertx.clusteredVertx(options, res -> { assertTrue(res.succeeded()); assertNotNull(mgr.getHazelcastInstance()); res.result().close(res2 -> { assertTrue(res2.succeeded()); testComplete(); }); }); await(); }
assertEquals(options, options.setClusterManager(mgr)); assertSame(mgr, options.getClusterManager()); assertFalse(options.isHAEnabled());