@Override public void withEndPoints(Function<Collection<EndPoint>, ?> function) { List<EndPoint> endPoints = Lists.newArrayList(); DataCenter self = _dataCenters.getSelf(); for (DataCenter dataCenter : _dataCenters.getAll()) { if (!dataCenter.equals(self)) { final URI adminUri = dataCenter.getAdminUri(); endPoints.add(new EndPoint() { @Override public String getAddress() { return _endPointAdapter.toEndPointAddress(adminUri); } @Override public boolean isValid() { return true; } }); } } function.apply(endPoints); } }
@Override public void withEndPoints(Function<Collection<EndPoint>, ?> function) { List<EndPoint> endPoints = Lists.newArrayList(); DataCenter self = _dataCenters.getSelf(); for (DataCenter dataCenter : _dataCenters.getAll()) { if (!dataCenter.equals(self)) { final URI adminUri = dataCenter.getAdminUri(); endPoints.add(new EndPoint() { @Override public String getAddress() { return _endPointAdapter.toEndPointAddress(adminUri); } @Override public boolean isValid() { return true; } }); } } function.apply(endPoints); } }
private void drainDataCenterPartition(String dataCenter, int from, int to, PrintWriter printWriter) { Map<String, DataCenter> availableDataCenters = _dataCenters.getAll().stream() .filter(dc -> !_dataCenters.getSelf().equals(dc)) .collect(Collectors.toMap(DataCenter::getName, dc -> dc)); DataCenter outboundDataCenter = availableDataCenters.get(dataCenter); if (outboundDataCenter == null) { printWriter.write("Invalid data center, must be one of " + Joiner.on(",").join(availableDataCenters.keySet())); return; } if (from < _dataCenterFanoutPartitions) { printWriter.write("Cannot drain partition currently in use"); return; } if (to >= _dataCenterFanoutPartitions) { printWriter.write("Cannot drain to partition not in use"); return; } printWriter.write(String.format("Draining %s partition %d to partition %d...\n", dataCenter, from, to)); _eventStore.move(ChannelNames.getReplicationFanoutChannel(outboundDataCenter, from), ChannelNames.getReplicationFanoutChannel(outboundDataCenter, to)); printWriter.write("Done!\n"); }
private void drainDataCenterPartition(String dataCenter, int from, int to, PrintWriter printWriter) { Map<String, DataCenter> availableDataCenters = _dataCenters.getAll().stream() .filter(dc -> !_dataCenters.getSelf().equals(dc)) .collect(Collectors.toMap(DataCenter::getName, dc -> dc)); DataCenter outboundDataCenter = availableDataCenters.get(dataCenter); if (outboundDataCenter == null) { printWriter.write("Invalid data center, must be one of " + Joiner.on(",").join(availableDataCenters.keySet())); return; } if (from < _dataCenterFanoutPartitions) { printWriter.write("Cannot drain partition currently in use"); return; } if (to >= _dataCenterFanoutPartitions) { printWriter.write("Cannot drain to partition not in use"); return; } printWriter.write(String.format("Draining %s partition %d to partition %d...\n", dataCenter, from, to)); _eventStore.move(ChannelNames.getReplicationFanoutChannel(outboundDataCenter, from), ChannelNames.getReplicationFanoutChannel(outboundDataCenter, to)); printWriter.write("Done!\n"); }
@Override protected void runOneIteration() throws Exception { try { // Start replication for all new data centers. Map<String, Managed> active = Maps.newHashMap(_dataCenterFanout); DataCenter self = _dataCenters.getSelf(); for (DataCenter dataCenter : _dataCenters.getAll()) { if (dataCenter.equals(self)) { continue; } Managed fanout = active.remove(dataCenter.getName()); if (fanout == null) { fanout = newInboundReplication(dataCenter); try { fanout.start(); } catch (Exception e) { _log.error("Unexpected exception starting replication service: {}", dataCenter.getName()); continue; } _dataCenterFanout.put(dataCenter.getName(), fanout); } } // If a DataCenter has been removed, stop replicating from it. stopAll(active); } catch (Throwable t) { _log.error("Unexpected exception polling data center changes.", t); } }
@Override protected void runOneIteration() throws Exception { try { // Start replication for all new data centers. Map<String, Managed> active = Maps.newHashMap(_dataCenterFanout); DataCenter self = _dataCenters.getSelf(); for (DataCenter dataCenter : _dataCenters.getAll()) { if (dataCenter.equals(self)) { continue; } Managed fanout = active.remove(dataCenter.getName()); if (fanout == null) { fanout = newInboundReplication(dataCenter); try { fanout.start(); } catch (Exception e) { _log.error("Unexpected exception starting replication service: {}", dataCenter.getName()); continue; } _dataCenterFanout.put(dataCenter.getName(), fanout); } } // If a DataCenter has been removed, stop replicating from it. stopAll(active); } catch (Throwable t) { _log.error("Unexpected exception polling data center changes.", t); } }
@Provides @Singleton @AllCompactionControlSources public List<CompactionControlSource> getAllCompactionControlSources(@LocalCompactionControl CompactionControlSource localCompactionControlSource, @ServerCluster String serverCluster, Client client, DataCenters dataCenters, @CompControlApiKey String compControlApiKey, HealthCheckRegistry healthCheckRegistry, MetricRegistry metrics) { List<CompactionControlSource> compactionControlSources = Lists.newArrayList(); for (DataCenter dataCenter : dataCenters.getAll()) { MultiThreadedServiceFactory<CompactionControlSource> clientFactory = new CompactionControlClientFactory(serverCluster, new JerseyEmoClient(client), compControlApiKey); if (dataCenter.equals(dataCenters.getSelf())) { compactionControlSources.add(localCompactionControlSource); } else { ServiceEndPoint endPoint = new ServiceEndPointBuilder() .withServiceName(clientFactory.getServiceName()) .withId(dataCenter.getName()) .withPayload(new PayloadBuilder() .withUrl(dataCenter.getServiceUri().resolve(DataStoreClient.SERVICE_PATH)) .withAdminUrl(dataCenter.getAdminUri()) .toString()) .build(); compactionControlSources.add(ServicePoolBuilder.create(CompactionControlSource.class) .withHostDiscovery(new FixedHostDiscovery(endPoint)) .withServiceFactory(clientFactory) .withCachingPolicy(ServiceCachingPolicyBuilder.getMultiThreadedClientPolicy()) .withMetricRegistry(metrics) .buildProxy(new ExponentialBackoffRetry(30, 1, 10, TimeUnit.SECONDS))); } } return compactionControlSources; }
private void pollQueueSizes() { _gauges.beginUpdates(); long totalMasterQueueSize = 0; for (int partition = 0; partition < _masterFanoutPartitions; partition++) { totalMasterQueueSize += pollQueueSize("master-" + partition, ChannelNames.getMasterFanoutChannel(partition)); } _gauges.gauge(newMetric("master")).set(totalMasterQueueSize); for (ClusterInfo cluster : _clusterInfo) { pollQueueSize("canary-" + cluster.getClusterMetric(), ChannelNames.getMasterCanarySubscription(cluster.getCluster())); } DataCenter self = _dataCenters.getSelf(); for (DataCenter dataCenter : _dataCenters.getAll()) { if (!dataCenter.equals(self)) { long totalDataCenterQueueSize = 0; for (int partition = 0; partition < _dataCenterFanoutPartitions; partition++) { totalDataCenterQueueSize += pollQueueSize("out-" + dataCenter.getName() + "-" + partition, ChannelNames.getReplicationFanoutChannel(dataCenter, partition)); } _gauges.gauge(newMetric("out-" + dataCenter.getName())).set(totalDataCenterQueueSize); } } _gauges.endUpdates(); }
private void pollQueueSizes() { _gauges.beginUpdates(); long totalMasterQueueSize = 0; for (int partition = 0; partition < _masterFanoutPartitions; partition++) { totalMasterQueueSize += pollQueueSize("master-" + partition, ChannelNames.getMasterFanoutChannel(partition)); } _gauges.gauge(newMetric("master")).set(totalMasterQueueSize); for (ClusterInfo cluster : _clusterInfo) { pollQueueSize("canary-" + cluster.getClusterMetric(), ChannelNames.getMasterCanarySubscription(cluster.getCluster())); } DataCenter self = _dataCenters.getSelf(); for (DataCenter dataCenter : _dataCenters.getAll()) { if (!dataCenter.equals(self)) { long totalDataCenterQueueSize = 0; for (int partition = 0; partition < _dataCenterFanoutPartitions; partition++) { totalDataCenterQueueSize += pollQueueSize("out-" + dataCenter.getName() + "-" + partition, ChannelNames.getReplicationFanoutChannel(dataCenter, partition)); } _gauges.gauge(newMetric("out-" + dataCenter.getName())).set(totalDataCenterQueueSize); } } _gauges.endUpdates(); }