private static void waitUntilClusterSafe() throws InterruptedException { while (!clusterA.getPartitionService().isClusterSafe()) { Thread.sleep(100); } }
private static void waitUntilClusterSafe() throws InterruptedException { while (!clusterB.getPartitionService().isClusterSafe()) { Thread.sleep(100); } }
public static void main(String[] args) { HazelcastInstance node = Hazelcast.newHazelcastInstance(); boolean safe = node.getPartitionService().isClusterSafe(); System.out.printf("# Is cluster safe for shutdown\t: %s\n", safe); } }
private void waitUntilClusterSafe() { while (!clusterA.getPartitionService().isClusterSafe()) { sleepMillis(100); } }
private static void waitUntilClusterSafe() { while (!clusterB.getPartitionService().isClusterSafe()) { sleepMillis(100); } }
private void waitUntilClusterSafe() { while (!clusterB.getPartitionService().isClusterSafe()) { sleepMillis(100); } }
private static void waitUntilClusterSafe() { while (!clusterA.getPartitionService().isClusterSafe()) { sleepMillis(100); } }
public static void main(String[] args) { Config cfg = new ClasspathXmlConfig("hazelcast.xml"); Hazelcast.newHazelcastInstance(cfg); Hazelcast.newHazelcastInstance(cfg); HazelcastInstance hz = Hazelcast.newHazelcastInstance(cfg); Map<Double, Integer> map = hz.getMap("example"); for (int i = 0; i < 30000; i++) { if (i % 1000 == 0) { System.out.println("Adding " + i + "th element to map"); } map.put(Math.random(), i); } while (true) { sleepMillis(500); System.out.println(hz.getPartitionService().isClusterSafe()); System.out.println(hz.getMap("example").size()); } } }
@ManagedAnnotation("isClusterSafe") @ManagedDescription("Is the cluster in a safe state") public boolean isClusterSafe() { return hazelcastInstance.getPartitionService().isClusterSafe(); }
@ManagedAnnotation("isClusterSafe") @ManagedDescription("Is the cluster in a safe state") public boolean isClusterSafe() { return hazelcastInstance.getPartitionService().isClusterSafe(); }
@Prepare public void prepare() { if (!isMemberNode(targetInstance)) { return; } int retry = 0; while (!partitionService.isClusterSafe() && retry++ < isClusterSafeRetries) { logger.info(name + ": isClusterSafe() " + partitionService.isClusterSafe()); sleepSeconds(1); } logger.info(name + ": isClusterSafe() " + partitionService.isClusterSafe()); logger.info(name + ": isLocalMemberSafe() " + partitionService.isLocalMemberSafe()); logger.info(name + ": getCluster().getMembers().size() " + targetInstance.getCluster().getMembers().size()); logPartitionStatistics(logger, name, map, false); }
} else { if (hz.getPartitionService().isClusterSafe()) { try { result = populateSchema(hz);
public static void closeClustered(List<Vertx> clustered) throws Exception { for (Vertx vertx : clustered) { VertxInternal vertxInternal = (VertxInternal) vertx; HazelcastClusterManager clusterManager = (HazelcastClusterManager) vertxInternal.getClusterManager(); HazelcastInstance hazelcastInstance = clusterManager.getHazelcastInstance(); SECONDS.sleep(2); // Make sure rebalancing has been triggered long start = System.currentTimeMillis(); try { while (!hazelcastInstance.getPartitionService().isClusterSafe() && System.currentTimeMillis() - start < MILLISECONDS.convert(2, MINUTES)) { MILLISECONDS.sleep(100); } } catch (Exception ignore) { } CountDownLatch latch = new CountDownLatch(1); vertxInternal.close(ar -> { if (ar.failed()) { log.error("Failed to shutdown vert.x", ar.cause()); } latch.countDown(); }); latch.await(2, TimeUnit.MINUTES); } }
while (!hi.getPartitionService().isClusterSafe()) { log.info("Waiting for cluster to be in a steady state"); try {