@Override public String toString() { return this.getIpAddr() + ":" + this.getPort() + ((this.getEndpointId() == null) ? "" : ":" + this.getEndpointId()); }
public String getHostId() { return this.getIpAddr() + "-" + this.getEndpointId(); } }
/** * Factory method to construct Store exceptions. * * @param type Type of Exception. * @param message Exception message * @return Instance of ClusterException. */ public static ClusterException create(Type type, String message) { switch (type) { case METASTORE: return new MetaStoreException(message); default: throw new IllegalArgumentException("Invalid exception type"); } }
cluster.registerHost(new Host("localhost1", 1, null)); assertTrue(sync.tryAcquire(10, TimeUnit.SECONDS)); assertEquals(1, hostStore.getHostContainersMap().size()); cluster.registerHost(new Host("localhost2", 2, null)); cluster.registerHost(new Host("localhost3", 3, null)); cluster.registerHost(new Host("localhost4", 4, null)); cluster.deregisterHost(new Host("localhost1", 1, null)); assertTrue(sync.tryAcquire(10, TimeUnit.SECONDS)); assertEquals(3, hostStore.getHostContainersMap().size()); cluster.registerHost(new Host("localhost1", 1, null));
public CompletableFuture<List<NodeUri>> getControllerServerList() { if (cluster == null) { return Futures.failedFuture(new IllegalStateException("Controller cluster not initialized")); } return CompletableFuture.supplyAsync(() -> { try { return cluster.getClusterMembers().stream() .map(host -> NodeUri.newBuilder().setEndpoint(host.getIpAddr()).setPort(host.getPort()).build()) .collect(Collectors.toList()); } catch (ClusterException e) { // cluster implementation throws checked exceptions which cannot be thrown inside completable futures. throw Exceptions.sneakyThrow(e); } }, executor); }
public Controller.NodeUri getSegmentUri(final String scope, final String stream, final long segmentId, final HostControllerStore hostStore) { final Host host = hostStore.getHostForSegment(scope, stream, segmentId); return Controller.NodeUri.newBuilder().setEndpoint(host.getIpAddr()).setPort(host.getPort()).build(); }
@Override public Host getHostForSegment(String scope, String stream, long segmentId) { return new Host("localhost", 1000, ""); } }
@Override public void initialize() { Exceptions.checkNotClosed(closed.get(), this); long traceId = LoggerHelpers.traceEnter(log, "initialize"); try { // Initialize the container monitor. this.containerMonitor.initialize(); // Advertise this segment store to the cluster. this.cluster.registerHost(this.host); log.info("Initialized."); LoggerHelpers.traceLeave(log, "initialize", traceId); } catch (Exception ex) { // Need to make sure we clean up resources if we failed to initialize. log.error("Initialization error. Cleaning up.", ex); close(); throw ex; } }
@Override protected void shutDown() throws Exception { long traceId = LoggerHelpers.traceEnter(log, objectId, "shutDown"); try { log.info("Deregistering host {} from controller cluster", host); cluster.deregisterHost(host); log.info("Controller cluster listener shutDown complete"); } finally { LoggerHelpers.traceLeave(log, objectId, "shutDown", traceId); } } }
private void triggerRebalance() throws IOException { //Read the current mapping from the host store and write back the update after rebalancing. try { Map<Host, Set<Integer>> newMapping = segBalancer.rebalance(hostStore.getHostContainersMap(), pravegaServiceCluster.getClusterMembers()); Map<Host, Set<Integer>> oldMapping = hostStore.getHostContainersMap(); hostStore.updateHostContainersMap(newMapping); hostContainerMetrics.updateHostContainerMetrics(oldMapping, newMapping); } catch (ClusterException e) { throw new IOException(e); } }
private void reportContainerCountPerHost(Host host, Set<Integer> containerIds) { DYNAMIC_LOGGER.reportGaugeValue(nameFromHost(SEGMENT_STORE_HOST_CONTAINER_COUNT, host.toString()), containerIds.size()); }
@After public void stopZookeeper() throws Exception { cluster.close(); zkClient.close(); zkTestServer.close(); }
private void initializeCache() throws ClusterException { cache = Optional.of(new PathChildrenCache(client, getPathPrefix(), true)); try { cache.get().start(PathChildrenCache.StartMode.BUILD_INITIAL_CACHE); } catch (Exception e) { throw ClusterException.create(ClusterException.Type.METASTORE, "Failed to initialize ZooKeeper cache: " + e.getMessage()); } }
private PathChildrenCacheListener pathChildrenCacheListener(final ClusterListener listener) { return (client, event) -> { log.debug("Event {} generated on cluster", event); switch (event.getType()) { case CHILD_ADDED: log.info("Node {} added to cluster", getServerName(event)); listener.onEvent(HOST_ADDED, (Host) SerializationUtils.deserialize(event.getData().getData())); break; case CHILD_REMOVED: log.info("Node {} removed from cluster", getServerName(event)); listener.onEvent(HOST_REMOVED, (Host) SerializationUtils.deserialize(event.getData().getData())); break; case CHILD_UPDATED: log.warn("Invalid usage: Node {} updated externally for cluster", getServerName(event)); break; case CONNECTION_LOST: log.error("Connection lost with Zookeeper"); listener.onEvent(ERROR, null); break; //$CASES-OMITTED$ default: log.warn("Received unknown event {}", event.getType()); } }; }
private CompletableFuture<Void> handleHostRemoved(Host host) { return Futures.allOf(sweepers.stream().map(sweeper -> { if (sweeper.isReady()) { // Note: if we find sweeper to be ready, it is possible that this processes can be swept by both // sweepFailedProcesses and handleFailedProcess. A sweep is safe and idempotent operation. return RetryHelper.withIndefiniteRetriesAsync(() -> sweeper.handleFailedProcess(host.getHostId()), e -> log.warn(e.getMessage()), executor); } else { return CompletableFuture.completedFuture((Void) null); } }).collect(Collectors.toList())); }
private void validateStore(HostControllerStore hostStore) { // Validate store values. Assert.assertEquals(containerCount, hostStore.getContainerCount()); Host hostObj = hostStore.getHostForSegment("dummyScope", "dummyStream", (int) Math.floor(containerCount * Math.random())); Assert.assertEquals(controllerPort, hostObj.getPort()); Assert.assertEquals(host, hostObj.getIpAddr()); } }
public static Map<Host, Set<Integer>> getHostContainerMap(String host, int port, int containerCount) { Exceptions.checkNotNullOrEmpty(host, "host"); Preconditions.checkArgument(port > 0, "port"); Preconditions.checkArgument(containerCount > 0, "containerCount"); Map<Host, Set<Integer>> hostContainerMap = new HashMap<>(); hostContainerMap.put(new Host(host, port, null), IntStream.range(0, containerCount).boxed().collect(Collectors.toSet())); return hostContainerMap; } }
private void reportHostFailures(Host failedHost) { DYNAMIC_LOGGER.incCounterValue(globalMetricName(SEGMENT_STORE_HOST_FAILURES), 1); DYNAMIC_LOGGER.incCounterValue(nameFromHost(SEGMENT_STORE_HOST_FAILURES, failedHost.toString()), 1); // Set to 0 the number of containers for the failed host. DYNAMIC_LOGGER.reportGaugeValue(nameFromHost(SEGMENT_STORE_HOST_CONTAINER_COUNT, failedHost.toString()), 0); }
@Override public void tearDown() throws Exception { if (executorService != null) { ExecutorServiceHelpers.shutdown(executorService); } if (streamMetadataTasks != null) { streamMetadataTasks.close(); } if (streamTransactionMetadataTasks != null) { streamTransactionMetadataTasks.close(); } if (cluster != null) { cluster.close(); } storeClient.close(); zkClient.close(); zkServer.close(); }
private void attachZKSegmentManager(ServiceBuilder builder) { builder.withContainerManager(setup -> new ZKSegmentContainerManager(setup.getContainerRegistry(), this.zkClient, new Host(this.serviceConfig.getPublishedIPAddress(), this.serviceConfig.getPublishedPort(), null), setup.getCoreExecutor())); }