private void undeployHADeployments() { for (String deploymentID: deploymentManager.deployments()) { Deployment dep = deploymentManager.getDeployment(deploymentID); if (dep != null) { if (dep.deploymentOptions().isHa()) { ContextInternal ctx = vertx.getContext(); try { ContextImpl.setContext(null); deploymentManager.undeployVerticle(deploymentID, result -> { if (result.succeeded()) { log.info("Successfully undeployed HA deployment " + deploymentID + "-" + dep.verticleIdentifier() + " as there is no quorum"); addToHADeployList(dep.verticleIdentifier(), dep.deploymentOptions(), result1 -> { if (result1.succeeded()) { log.info("Successfully redeployed verticle " + dep.verticleIdentifier() + " after quorum was re-attained"); } else { log.error("Failed to redeploy verticle " + dep.verticleIdentifier() + " after quorum was re-attained", result1.cause()); } }); } else { log.error("Failed to undeploy deployment on lost quorum", result.cause()); } }); } finally { ContextImpl.setContext((ContextImpl) ctx); } } } } }
@Test public void testGetInstanceCount() throws Exception { class MultiInstanceVerticle extends AbstractVerticle { @Override public void start() { assertEquals(vertx.getOrCreateContext().getInstanceCount(), 1); } } vertx.deployVerticle(new MultiInstanceVerticle(), ar -> { assertTrue(ar.succeeded()); testComplete(); }); await(); Deployment deployment = ((VertxInternal) vertx).getDeployment(vertx.deploymentIDs().iterator().next()); vertx.undeploy(deployment.deploymentID()); }
private synchronized void doUndeployChildren(ContextInternal undeployingContext, Handler<AsyncResult<Void>> completionHandler) { if (!children.isEmpty()) { final int size = children.size(); AtomicInteger childCount = new AtomicInteger(); boolean undeployedSome = false; for (Deployment childDeployment: new HashSet<>(children)) { undeployedSome = true; childDeployment.doUndeploy(undeployingContext, ar -> { children.remove(childDeployment); if (ar.failed()) { reportFailure(ar.cause(), undeployingContext, completionHandler); } else if (childCount.incrementAndGet() == size) { // All children undeployed completionHandler.handle(Future.succeededFuture()); } }); } if (!undeployedSome) { // It's possible that children became empty before iterating completionHandler.handle(Future.succeededFuture()); } } else { completionHandler.handle(Future.succeededFuture()); } }
public int getInstanceCount() { // the no verticle case if (deployment == null) { return 0; } // the single verticle without an instance flag explicitly defined if (deployment.deploymentOptions() == null) { return 1; } return deployment.deploymentOptions().getInstances(); } }
@Test public void testNonHADeployments() throws Exception { vertx1 = startVertx(); vertx2 = startVertx(); // Deploy an HA and a non HA deployment CountDownLatch latch1 = new CountDownLatch(2); vertx2.deployVerticle("java:" + HAVerticle1.class.getName(), new DeploymentOptions().setHa(true), ar -> { assertTrue(ar.succeeded()); assertTrue(vertx2.deploymentIDs().contains(ar.result())); latch1.countDown(); }); vertx2.deployVerticle("java:" + HAVerticle2.class.getName(), new DeploymentOptions().setHa(false), ar -> { assertTrue(ar.succeeded()); assertTrue(vertx2.deploymentIDs().contains(ar.result())); latch1.countDown(); }); awaitLatch(latch1); CountDownLatch latch2 = new CountDownLatch(1); ((VertxInternal)vertx1).failoverCompleteHandler((nodeID, haInfo, succeeded) -> { assertTrue(succeeded); latch2.countDown(); }); ((VertxInternal)vertx2).simulateKill(); awaitLatch(latch2); assertTrue(vertx1.deploymentIDs().size() == 1); String depID = vertx1.deploymentIDs().iterator().next(); assertTrue(((VertxInternal) vertx1).getDeployment(depID).verticleIdentifier().equals("java:" + HAVerticle1.class.getName())); }
assertEquals(1, vertx.deploymentIDs().size()); Deployment deployment = ((VertxInternal) vertx).getDeployment(vertx.deploymentIDs().iterator().next()); Set<Verticle> verticles = deployment.getVerticles(); assertEquals(numInstances, verticles.size()); CountDownLatch undeployLatch = new CountDownLatch(1); assertEquals(numInstances, deployCount.get()); vertx.undeploy(deployment.deploymentID(), onSuccess(v -> { assertEquals(1, undeployHandlerCount.incrementAndGet()); undeployLatch.countDown();
public void undeployAll(Handler<AsyncResult<Void>> completionHandler) { // TODO timeout if it takes too long - e.g. async stop verticle fails to call future // We only deploy the top level verticles as the children will be undeployed when the parent is Set<String> deploymentIDs = new HashSet<>(); for (Map.Entry<String, Deployment> entry: deployments.entrySet()) { if (!entry.getValue().isChild()) { deploymentIDs.add(entry.getKey()); } } if (!deploymentIDs.isEmpty()) { AtomicInteger count = new AtomicInteger(0); for (String deploymentID : deploymentIDs) { undeployVerticle(deploymentID, ar -> { if (ar.failed()) { // Log but carry on regardless log.error("Undeploy failed", ar.cause()); } if (count.incrementAndGet() == deploymentIDs.size()) { completionHandler.handle(Future.succeededFuture()); } }); } } else { Context context = vertx.getOrCreateContext(); context.runOnContext(v -> completionHandler.handle(Future.succeededFuture())); } }
if (ar.succeeded()) { if (parent != null) { if (parent.addChild(deployment)) { deployment.child = true; } else {
public void removeFromHA(String depID) { Deployment dep = deploymentManager.getDeployment(depID); if (dep == null || !dep.deploymentOptions().isHa()) { return; } synchronized (haInfo) { JsonArray haMods = haInfo.getJsonArray("verticles"); Iterator<Object> iter = haMods.iterator(); while (iter.hasNext()) { Object obj = iter.next(); JsonObject mod = (JsonObject) obj; if (mod.getString("dep_id").equals(depID)) { iter.remove(); } } clusterMap.put(nodeID, haInfo.encode()); } }
@Test public void testCloseRemovesFromCluster() throws Exception { vertx1 = startVertx(); vertx2 = startVertx(); vertx3 = startVertx(); CountDownLatch latch1 = new CountDownLatch(1); vertx3.deployVerticle("java:" + HAVerticle1.class.getName(), new DeploymentOptions().setHa(true), ar -> { assertTrue(ar.succeeded()); assertTrue(vertx3.deploymentIDs().contains(ar.result())); latch1.countDown(); }); awaitLatch(latch1); CountDownLatch latch2 = new CountDownLatch(1); // Close vertx2 - this should not then participate in failover vertx2.close(ar -> { ((VertxInternal) vertx1).failoverCompleteHandler((nodeID, haInfo, succeeded) -> { assertTrue(succeeded); latch2.countDown(); }); ((VertxInternal) vertx3).simulateKill(); }); awaitLatch(latch2); assertTrue(vertx1.deploymentIDs().size() == 1); String depID = vertx1.deploymentIDs().iterator().next(); assertTrue(((VertxInternal) vertx1).getDeployment(depID).verticleIdentifier().equals("java:" + HAVerticle1.class.getName())); }
assertEquals(1, vertx.deploymentIDs().size()); Deployment deployment = ((VertxInternal) vertx).getDeployment(vertx.deploymentIDs().iterator().next()); Set<Verticle> verticles = deployment.getVerticles(); assertEquals(numInstances, verticles.size()); CountDownLatch undeployLatch = new CountDownLatch(1); assertEquals(numInstances, deployCount.get()); vertx.undeploy(deployment.deploymentID(), onSuccess(v -> { assertEquals(1, undeployHandlerCount.incrementAndGet()); undeployLatch.countDown();
public void undeployAll(Handler<AsyncResult<Void>> completionHandler) { // TODO timeout if it takes too long - e.g. async stop verticle fails to call future // We only deploy the top level verticles as the children will be undeployed when the parent is Set<String> deploymentIDs = new HashSet<>(); for (Map.Entry<String, Deployment> entry: deployments.entrySet()) { if (!entry.getValue().isChild()) { deploymentIDs.add(entry.getKey()); } } if (!deploymentIDs.isEmpty()) { AtomicInteger count = new AtomicInteger(0); for (String deploymentID : deploymentIDs) { undeployVerticle(deploymentID, ar -> { if (ar.failed()) { // Log but carry on regardless log.error("Undeploy failed", ar.cause()); } if (count.incrementAndGet() == deploymentIDs.size()) { completionHandler.handle(Future.succeededFuture()); } }); } } else { Context context = vertx.getOrCreateContext(); context.runOnContext(v -> completionHandler.handle(Future.succeededFuture())); } }
if (ar.succeeded()) { if (parent != null) { if (parent.addChild(deployment)) { deployment.child = true; } else {
protected int checkHasDeployments(int pos, int prevPos) { Set<Deployment> prevSet = deploymentSnapshots[prevPos]; Set<Deployment> currSet = takeDeploymentSnapshot(pos); for (Deployment prev: prevSet) { boolean contains = false; for (Deployment curr: currSet) { if (curr.verticleIdentifier().equals(prev.verticleIdentifier()) && curr.deploymentOptions().equals(prev.deploymentOptions())) { contains = true; break; } } assertTrue(contains); } return currSet.size(); }
Deployment dep = ((VertxInternal)vertx).getDeployment(res.result()); assertNotNull(dep); assertFalse(original.equals(dep.deploymentOptions())); assertFalse(dep.deploymentOptions().getConfig().containsKey("foo")); assertEquals("quux", dep.deploymentOptions().getConfig().getString("wibble")); assertTrue(dep.deploymentOptions().isWorker()); assertEquals("othergroup", dep.deploymentOptions().getIsolationGroup()); testComplete(); });
@Test public void testNonHADeployments() throws Exception { vertx1 = startVertx(); vertx2 = startVertx(); // Deploy an HA and a non HA deployment CountDownLatch latch1 = new CountDownLatch(2); vertx2.deployVerticle("java:" + HAVerticle1.class.getName(), new DeploymentOptions().setHa(true), ar -> { assertTrue(ar.succeeded()); assertTrue(vertx2.deploymentIDs().contains(ar.result())); latch1.countDown(); }); vertx2.deployVerticle("java:" + HAVerticle2.class.getName(), new DeploymentOptions().setHa(false), ar -> { assertTrue(ar.succeeded()); assertTrue(vertx2.deploymentIDs().contains(ar.result())); latch1.countDown(); }); awaitLatch(latch1); CountDownLatch latch2 = new CountDownLatch(1); ((VertxInternal)vertx1).failoverCompleteHandler((nodeID, haInfo, succeeded) -> { assertTrue(succeeded); latch2.countDown(); }); ((VertxInternal)vertx2).simulateKill(); awaitLatch(latch2); assertTrue(vertx1.deploymentIDs().size() == 1); String depID = vertx1.deploymentIDs().iterator().next(); assertTrue(((VertxInternal) vertx1).getDeployment(depID).verticleIdentifier().equals("java:" + HAVerticle1.class.getName())); }
@Test public void testGetInstanceCountMultipleVerticles() throws Exception { AtomicInteger messageCount = new AtomicInteger(0); AtomicInteger totalReportedInstances = new AtomicInteger(0); vertx.eventBus().consumer("instanceCount", event -> { messageCount.incrementAndGet(); totalReportedInstances.addAndGet((int)event.body()); if(messageCount.intValue() == 3) { assertEquals(9, totalReportedInstances.get()); testComplete(); } }); vertx.deployVerticle(TestVerticle3.class.getCanonicalName(), new DeploymentOptions().setInstances(3), ar -> { assertTrue(ar.succeeded()); }); await(); Deployment deployment = ((VertxInternal) vertx).getDeployment(vertx.deploymentIDs().iterator().next()); CountDownLatch latch = new CountDownLatch(1); vertx.undeploy(deployment.deploymentID(), ar -> latch.countDown()); awaitLatch(latch); }
private synchronized void doUndeployChildren(ContextInternal undeployingContext, Handler<AsyncResult<Void>> completionHandler) { if (!children.isEmpty()) { final int size = children.size(); AtomicInteger childCount = new AtomicInteger(); boolean undeployedSome = false; for (Deployment childDeployment: new HashSet<>(children)) { undeployedSome = true; childDeployment.doUndeploy(undeployingContext, ar -> { children.remove(childDeployment); if (ar.failed()) { reportFailure(ar.cause(), undeployingContext, completionHandler); } else if (childCount.incrementAndGet() == size) { // All children undeployed completionHandler.handle(Future.succeededFuture()); } }); } if (!undeployedSome) { // It's possible that children became empty before iterating completionHandler.handle(Future.succeededFuture()); } } else { completionHandler.handle(Future.succeededFuture()); } }
protected void checkDeploymentExists(int pos, String verticleName, DeploymentOptions options) { VertxInternal vi = (VertxInternal)vertices[pos]; for (String deploymentID: vi.deploymentIDs()) { Deployment dep = vi.getDeployment(deploymentID); if (verticleName.equals(dep.verticleIdentifier()) && options.equals(dep.deploymentOptions())) { return; } } fail("Can't find deployment for verticleName: " + verticleName + " on node " + pos); }
public int getInstanceCount() { // the no verticle case if (deployment == null) { return 0; } // the single verticle without an instance flag explicitly defined if (deployment.deploymentOptions() == null) { return 1; } return deployment.deploymentOptions().getInstances(); } }