@Override public CompletableFuture<Void> close() { if (!open) return CompletableFuture.completedFuture(null); if (closeFuture == null) { synchronized (this) { if (closeFuture == null) { if (openFuture == null) { closeFuture = cluster().leave().thenCompose(v -> kill()); } else { closeFuture = openFuture.thenCompose(c -> cluster().leave().thenCompose(v -> kill())); } } } } return closeFuture; }
/** * Balances the cluster. */ private void balance() { if (lock != null && !locking && server.server().cluster().member().equals(server.server().cluster().leader())) { locking = true; lock.lock() .thenCompose(v -> balancer.balance(server.server().cluster())) .whenComplete((r1, e1) -> lock.unlock().whenComplete((r2, e2) -> locking = false)); } }
closeFuture = new CompletableFuture<>(); if (openFuture == null) { cluster().leave().whenComplete((leaveResult, leaveError) -> { shutdown().whenComplete((shutdownResult, shutdownError) -> { context.delete(); openFuture.whenComplete((openResult, openError) -> { if (openError == null) { cluster().leave().whenComplete((leaveResult, leaveError) -> { shutdown().whenComplete((shutdownResult, shutdownError) -> { context.delete();
/** * Starts listening the server. */ private CompletableFuture<Void> listen() { CompletableFuture<Void> future = new CompletableFuture<>(); context.getThreadContext().executor().execute(() -> { internalServer.listen(cluster().member().serverAddress(), context::connectServer).whenComplete((internalResult, internalError) -> { if (internalError == null) { // If the client address is different than the server address, start a separate client server. if (clientServer != null) { clientServer.listen(cluster().member().clientAddress(), context::connectClient).whenComplete((clientResult, clientError) -> { started = true; future.complete(null); }); } else { started = true; future.complete(null); } } else { future.completeExceptionally(internalError); } }); }); return future; }
/** * Bootstraps the cluster using the provided cluster configuration. * <p> * Bootstrapping the cluster results in a new cluster being formed with the provided configuration. The initial * nodes in a cluster must always be bootstrapped. This is necessary to prevent split brain. If the provided * configuration is empty, the local server will form a single-node cluster. * <p> * Only {@link Member.Type#ACTIVE} members can be included in a bootstrap configuration. If the local server is * not initialized as an active member, it cannot be part of the bootstrap configuration for the cluster. * <p> * When the cluster is bootstrapped, the local server will be transitioned into the active state and begin * participating in the Raft consensus algorithm. When the cluster is first bootstrapped, no leader will exist. * The bootstrapped members will elect a leader amongst themselves. Once a cluster has been bootstrapped, additional * members may be {@link #join(Address...) joined} to the cluster. In the event that the bootstrapped members cannot * reach a quorum to elect a leader, bootstrap will continue until successful. * <p> * It is critical that all servers in a bootstrap configuration be started with the same exact set of members. * Bootstrapping multiple servers with different configurations may result in split brain. * <p> * The {@link CompletableFuture} returned by this method will be completed once the cluster has been bootstrapped, * a leader has been elected, and the leader has been notified of the local server's client configurations. * * @param cluster The bootstrap cluster configuration. * @return A completable future to be completed once the cluster has been bootstrapped. */ public CompletableFuture<CopycatServer> bootstrap(Collection<Address> cluster) { return start(() -> cluster().bootstrap(cluster)); }
@Override public CompletableFuture<Void> close() { CompletableFuture<Void> future = new CompletableFuture<>(); lock.lock() .thenCompose(v -> balancer.replace(server.server().cluster())) .whenComplete((r1, e1) -> { balancer.close(); lock.unlock().whenComplete((r2, e2) -> { super.close().whenComplete((r3, e3) -> { server.close().whenComplete((r4, e4) -> { if (e4 == null) { future.complete(null); } else { future.completeExceptionally(e4); } }); }); }); }); return future; }
closeFuture = new CompletableFuture<>(); if (openFuture == null) { cluster().leave().whenComplete((leaveResult, leaveError) -> { shutdown().whenComplete((shutdownResult, shutdownError) -> { context.delete(); openFuture.whenComplete((openResult, openError) -> { if (openError == null) { cluster().leave().whenComplete((leaveResult, leaveError) -> { shutdown().whenComplete((shutdownResult, shutdownError) -> { context.delete();
CompletableFuture<Void> leaveFuture; if (remove) { System.out.println("Removing server: " + server.cluster().member().address()); leaveFuture = server.leave(); } else { System.out.println("Shutting down server: " + server.cluster().member().address()); leaveFuture = server.shutdown(); restartTimers.put(serverIndex, context.schedule(Duration.ofSeconds(randomNumber(120) + 10), () -> { restartTimers.remove(serverIndex); CopycatServer newServer = createServer(server.cluster().member()); servers.set(serverIndex, newServer); CompletableFuture<CopycatServer> joinFuture; if (remove) { System.out.println("Adding server: " + newServer.cluster().member().address()); joinFuture = newServer.join(members.get(members.size() - 1).address()); } else { System.out.println("Bootstrapping server: " + newServer.cluster().member().address()); joinFuture = newServer.bootstrap(members.stream().map(Member::serverAddress).collect(Collectors.toList()));
/** * Bootstraps the cluster using the provided cluster configuration. * <p> * Bootstrapping the cluster results in a new cluster being formed with the provided configuration. The initial * nodes in a cluster must always be bootstrapped. This is necessary to prevent split brain. If the provided * configuration is empty, the local server will form a single-node cluster. * <p> * Only {@link Member.Type#ACTIVE} members can be included in a bootstrap configuration. If the local server is * not initialized as an active member, it cannot be part of the bootstrap configuration for the cluster. * <p> * When the cluster is bootstrapped, the local server will be transitioned into the active state and begin * participating in the Raft consensus algorithm. When the cluster is first bootstrapped, no leader will exist. * The bootstrapped members will elect a leader amongst themselves. Once a cluster has been bootstrapped, additional * members may be {@link #join(Address...) joined} to the cluster. In the event that the bootstrapped members cannot * reach a quorum to elect a leader, bootstrap will continue until successful. * <p> * It is critical that all servers in a bootstrap configuration be started with the same exact set of members. * Bootstrapping multiple servers with different configurations may result in split brain. * <p> * The {@link CompletableFuture} returned by this method will be completed once the cluster has been bootstrapped, * a leader has been elected, and the leader has been notified of the local server's client configurations. * * @param cluster The bootstrap cluster configuration. * @return A completable future to be completed once the cluster has been bootstrapped. */ public CompletableFuture<CopycatServer> bootstrap(Collection<Address> cluster) { return start(() -> cluster().bootstrap(cluster)); }
/** * Starts listening the server. */ private CompletableFuture<Void> listen() { CompletableFuture<Void> future = new CompletableFuture<>(); context.getThreadContext().executor().execute(() -> { internalServer.listen(cluster().member().serverAddress(), context::connectServer).whenComplete((internalResult, internalError) -> { if (internalError == null) { // If the client address is different than the server address, start a separate client server. if (clientServer != null) { clientServer.listen(cluster().member().clientAddress(), context::connectClient).whenComplete((clientResult, clientError) -> { started = true; future.complete(null); }); } else { started = true; future.complete(null); } } else { future.completeExceptionally(internalError); } }); }); return future; }
return start(() -> cluster().join(cluster));
return start(() -> cluster().join(cluster));
/** * Starts listening the server. */ private CompletableFuture<Void> listen() { CompletableFuture<Void> future = new CompletableFuture<>(); context.getThreadContext().executor().execute(() -> { internalServer.listen(cluster().member().serverAddress(), c -> context.connectServer(c)).whenComplete((internalResult, internalError) -> { if (internalError == null) { // If the client address is different than the server address, start a separate client server. if (clientServer != null) { clientServer.listen(cluster().member().clientAddress(), c -> context.connectClient(c)).whenComplete((clientResult, clientError) -> { open = true; future.complete(null); }); } else { open = true; future.complete(null); } } else { future.completeExceptionally(internalError); } }); }); return future; }
/** * Registers membership change listeners on the cluster. */ private void registerListeners() { server.server().cluster().members().forEach(m -> { m.onTypeChange(t -> balance()); m.onStatusChange(s -> balance()); }); server.server().cluster().onLeaderElection(l -> balance()); server.server().cluster().onJoin(m -> { m.onTypeChange(t -> balance()); m.onStatusChange(s -> balance()); balance(); }); server.server().cluster().onLeave(m -> balance()); }
/** * Tests demoting the leader. */ public void testDemoteLeader() throws Throwable { List<CopycatServer> servers = createServers(3); CopycatServer leader = servers.stream() .filter(s -> s.cluster().member().equals(s.cluster().leader())) .findFirst() .get(); CopycatServer follower = servers.stream() .filter(s -> !s.cluster().member().equals(s.cluster().leader())) .findFirst() .get(); follower.cluster().member(leader.cluster().member().address()).onTypeChange(t -> { threadAssertEquals(t, Member.Type.PASSIVE); resume(); }); leader.cluster().member().demote(Member.Type.PASSIVE).thenRun(this::resume); await(10000, 2); }
/** * Tests detecting an availability change of a passive member on a reserve member. */ public void testReservePassiveAvailabilityChange() throws Throwable { createServers(3); CopycatServer passive = createServer(nextMember(Member.Type.PASSIVE)); passive.join(members.stream().map(Member::serverAddress).collect(Collectors.toList())).thenRun(this::resume); CopycatServer reserve = createServer(nextMember(Member.Type.RESERVE)); reserve.join(members.stream().map(Member::serverAddress).collect(Collectors.toList())).thenRun(this::resume); await(10000, 2); reserve.cluster().member(passive.cluster().member().address()).onStatusChange(s -> { threadAssertEquals(s, Member.Status.UNAVAILABLE); resume(); }); passive.shutdown().thenRun(this::resume); await(10000, 2); }
/** * Tests a member join event. */ private void testJoinEvent(Member.Type type) throws Throwable { List<CopycatServer> servers = createServers(3); Member member = nextMember(type); CopycatServer server = servers.get(0); server.cluster().onJoin(m -> { threadAssertEquals(m.address(), member.address()); threadAssertEquals(m.type(), type); resume(); }); CopycatServer joiner = createServer(member); joiner.join(members.stream().map(Member::serverAddress).collect(Collectors.toList())).thenRun(this::resume); await(10000, 2); }
/** * Tests a member availability change. */ private void testAvailabilityChange(Member.Type type) throws Throwable { List<CopycatServer> servers = createServers(3); CopycatServer server = servers.get(0); server.cluster().onJoin(m -> { m.onStatusChange(s -> { threadAssertEquals(s, Member.Status.UNAVAILABLE); resume(); }); }); Member member = nextMember(type); CopycatServer joiner = createServer(member); joiner.join(members.stream().map(Member::serverAddress).collect(Collectors.toList())).thenRun(this::resume); await(10000); joiner.shutdown().thenRun(this::resume); await(10000, 2); }
/** * Tests detecting an availability change of a reserve member on a passive member. */ public void testPassiveReserveAvailabilityChange() throws Throwable { createServers(3); CopycatServer passive = createServer(nextMember(Member.Type.PASSIVE)); passive.join(members.stream().map(Member::serverAddress).collect(Collectors.toList())).thenRun(this::resume); await(10000); Member reserveMember = nextMember(Member.Type.RESERVE); passive.cluster().onJoin(member -> { threadAssertEquals(member.address(), reserveMember.address()); member.onStatusChange(s -> { threadAssertEquals(s, Member.Status.UNAVAILABLE); resume(); }); }); CopycatServer reserve = createServer(reserveMember); reserve.join(members.stream().map(Member::serverAddress).collect(Collectors.toList())).thenRun(this::resume); await(10000); reserve.shutdown().thenRun(this::resume); await(10000, 2); }
/** * Tests starting several members individually. */ public void testSingleMemberStart() throws Throwable { CopycatServer server = createServers(1).get(0); server.bootstrap().thenRun(this::resume); await(5000); CopycatServer joiner1 = createServer(nextMember(Member.Type.ACTIVE)); joiner1.join(server.cluster().member().address()).thenRun(this::resume); await(5000); CopycatServer joiner2 = createServer(nextMember(Member.Type.ACTIVE)); joiner2.join(server.cluster().member().address()).thenRun(this::resume); await(5000); }