@Override public void withEndPoints(Function<Collection<EndPoint>, ?> function) { List<EndPoint> endPoints = Lists.newArrayList(); DataCenter self = _dataCenters.getSelf(); for (DataCenter dataCenter : _dataCenters.getAll()) { if (!dataCenter.equals(self)) { final URI adminUri = dataCenter.getAdminUri(); endPoints.add(new EndPoint() { @Override public String getAddress() { return _endPointAdapter.toEndPointAddress(adminUri); } @Override public boolean isValid() { return true; } }); } } function.apply(endPoints); } }
@Override public Collection<DataCenter> getDataCenters(String placement) { String keyspace = PlacementUtil.parsePlacement(placement)[0]; return _dataCenters.getForKeyspace(keyspace); } }
@Override public void start() throws Exception { Set<String> cassandraKeyspaces = Sets.newTreeSet(); for (KeyspaceDiscovery keyspaceDiscovery : _keyspaceDiscoveries) { cassandraKeyspaces.addAll(keyspaceDiscovery.getKeyspacesForDataCenter(_selfCassandraDataCenter)); } boolean system = _selfDataCenter.equals(_systemDataCenter); DataCenter self = new DefaultDataCenter(_selfDataCenter, _selfServiceUri, _selfAdminUri, system, _selfCassandraDataCenter, cassandraKeyspaces); DataCenter original; try { original = _dataCenters.getSelf(); } catch (Exception e) { original = null; // self hasn't been announced yet. } if (_dataCenterDao.saveIfChanged(self, original)) { _log.info("Announced new data center: {}", self); _dataCenters.refresh(); } }
@Override public String get() { return format(JOBS_TABLE_NAME_FORMAT, dataCenters.getSelf().getName().replaceAll("\\s", "_")); } };
@Override public String get() { return format(JOBS_TABLE_NAME_FORMAT, dataCenters.getSelf().getName().replaceAll("\\s", "_")); } };
@Override public void withEndPoints(Function<Collection<EndPoint>, ?> function) { List<EndPoint> endPoints = Lists.newArrayList(); DataCenter self = _dataCenters.getSelf(); for (DataCenter dataCenter : _dataCenters.getAll()) { if (!dataCenter.equals(self)) { final URI adminUri = dataCenter.getAdminUri(); endPoints.add(new EndPoint() { @Override public String getAddress() { return _endPointAdapter.toEndPointAddress(adminUri); } @Override public boolean isValid() { return true; } }); } } function.apply(endPoints); } }
private void cleanupScan(String id) { // Remove this scan from the active set if (_activeScans.remove(id)) { notifyActiveScanCountChanged(); } try { // Remove the table snapshots set for this scan _dataTools.clearStashTokenRangeSnapshot(id); } catch (Exception e) { _log.error("Failed to clean up table set for scan {}", id, e); } try { // Delete the entry of the scan start time in Zookeeper. _compactionControlSource.deleteStashTime(id, _dataCenters.getSelf().getName()); } catch (Exception e) { _log.error("Failed to delete the stash time for scan {}", id, e); } }
@Override public void start() throws Exception { Set<String> cassandraKeyspaces = Sets.newTreeSet(); for (KeyspaceDiscovery keyspaceDiscovery : _keyspaceDiscoveries) { cassandraKeyspaces.addAll(keyspaceDiscovery.getKeyspacesForDataCenter(_selfCassandraDataCenter)); } boolean system = _selfDataCenter.equals(_systemDataCenter); DataCenter self = new DefaultDataCenter(_selfDataCenter, _selfServiceUri, _selfAdminUri, system, _selfCassandraDataCenter, cassandraKeyspaces); DataCenter original; try { original = _dataCenters.getSelf(); } catch (Exception e) { original = null; // self hasn't been announced yet. } if (_dataCenterDao.saveIfChanged(self, original)) { _log.info("Announced new data center: {}", self); _dataCenters.refresh(); } }
@Override public Collection<DataCenter> getDataCenters(String placement) { String keyspace = PlacementUtil.parsePlacement(placement)[0]; return _dataCenters.getForKeyspace(keyspace); }
private void drainDataCenterPartition(String dataCenter, int from, int to, PrintWriter printWriter) { Map<String, DataCenter> availableDataCenters = _dataCenters.getAll().stream() .filter(dc -> !_dataCenters.getSelf().equals(dc)) .collect(Collectors.toMap(DataCenter::getName, dc -> dc)); DataCenter outboundDataCenter = availableDataCenters.get(dataCenter); if (outboundDataCenter == null) { printWriter.write("Invalid data center, must be one of " + Joiner.on(",").join(availableDataCenters.keySet())); return; } if (from < _dataCenterFanoutPartitions) { printWriter.write("Cannot drain partition currently in use"); return; } if (to >= _dataCenterFanoutPartitions) { printWriter.write("Cannot drain to partition not in use"); return; } printWriter.write(String.format("Draining %s partition %d to partition %d...\n", dataCenter, from, to)); _eventStore.move(ChannelNames.getReplicationFanoutChannel(outboundDataCenter, from), ChannelNames.getReplicationFanoutChannel(outboundDataCenter, to)); printWriter.write("Done!\n"); }
public void cancel(String id) { _scanStatusDAO.setCanceled(id); // Notify the workflow the scan status was updated _scanWorkflow.scanStatusUpdated(id); try { // Delete the entry of the scan start time in Zookeeper. _compactionControlSource.deleteStashTime(id, _dataCenters.getSelf().getName()); } catch (Exception e) { _log.error("Failed to delete the stash time for scan {}", id, e); } } }
@Override public Collection<DataCenter> getDataCenters(String placement) { String keyspace = PlacementUtil.parsePlacement(placement)[0]; return _dataCenters.getForKeyspace(keyspace); }
private void drainDataCenterPartition(String dataCenter, int from, int to, PrintWriter printWriter) { Map<String, DataCenter> availableDataCenters = _dataCenters.getAll().stream() .filter(dc -> !_dataCenters.getSelf().equals(dc)) .collect(Collectors.toMap(DataCenter::getName, dc -> dc)); DataCenter outboundDataCenter = availableDataCenters.get(dataCenter); if (outboundDataCenter == null) { printWriter.write("Invalid data center, must be one of " + Joiner.on(",").join(availableDataCenters.keySet())); return; } if (from < _dataCenterFanoutPartitions) { printWriter.write("Cannot drain partition currently in use"); return; } if (to >= _dataCenterFanoutPartitions) { printWriter.write("Cannot drain to partition not in use"); return; } printWriter.write(String.format("Draining %s partition %d to partition %d...\n", dataCenter, from, to)); _eventStore.move(ChannelNames.getReplicationFanoutChannel(outboundDataCenter, from), ChannelNames.getReplicationFanoutChannel(outboundDataCenter, to)); printWriter.write("Done!\n"); }
@Override public Managed newInboundReplicationFanout(DataCenter dataCenter, ReplicationSource replicationSource) { PartitionEventSourceSupplier eventSourceSupplier = partition -> new ReplicationEventSource(replicationSource, ChannelNames.getReplicationFanoutChannel(_dataCenters.getSelf(), partition)); return create("in-" + dataCenter.getName(), eventSourceSupplier, null, REMOTE_DC_SLEEP_WHEN_IDLE, _dataCenterFanoutPartitions); }
@Override protected void runOneIteration() throws Exception { try { // Start replication for all new data centers. Map<String, Managed> active = Maps.newHashMap(_dataCenterFanout); DataCenter self = _dataCenters.getSelf(); for (DataCenter dataCenter : _dataCenters.getAll()) { if (dataCenter.equals(self)) { continue; } Managed fanout = active.remove(dataCenter.getName()); if (fanout == null) { fanout = newInboundReplication(dataCenter); try { fanout.start(); } catch (Exception e) { _log.error("Unexpected exception starting replication service: {}", dataCenter.getName()); continue; } _dataCenterFanout.put(dataCenter.getName(), fanout); } } // If a DataCenter has been removed, stop replicating from it. stopAll(active); } catch (Throwable t) { _log.error("Unexpected exception polling data center changes.", t); } }
@Override public Managed newInboundReplicationFanout(DataCenter dataCenter, ReplicationSource replicationSource) { PartitionEventSourceSupplier eventSourceSupplier = partition -> new ReplicationEventSource(replicationSource, ChannelNames.getReplicationFanoutChannel(_dataCenters.getSelf(), partition)); return create("in-" + dataCenter.getName(), eventSourceSupplier, null, REMOTE_DC_SLEEP_WHEN_IDLE, _dataCenterFanoutPartitions); }
@Override protected void runOneIteration() throws Exception { try { // Start replication for all new data centers. Map<String, Managed> active = Maps.newHashMap(_dataCenterFanout); DataCenter self = _dataCenters.getSelf(); for (DataCenter dataCenter : _dataCenters.getAll()) { if (dataCenter.equals(self)) { continue; } Managed fanout = active.remove(dataCenter.getName()); if (fanout == null) { fanout = newInboundReplication(dataCenter); try { fanout.start(); } catch (Exception e) { _log.error("Unexpected exception starting replication service: {}", dataCenter.getName()); continue; } _dataCenterFanout.put(dataCenter.getName(), fanout); } } // If a DataCenter has been removed, stop replicating from it. stopAll(active); } catch (Throwable t) { _log.error("Unexpected exception polling data center changes.", t); } }
_compactionControlSource.updateStashTime(scanId, compactionControlTime, Lists.newArrayList(status.getOptions().getPlacements()), expireTime, _dataCenters.getSelf().getName()); } catch (Exception e) { _log.error("Failed to update the stash time for scan {}", scanId, e); _compactionControlSource.deleteStashTime(scanId, _dataCenters.getSelf().getName()); } catch (Exception ex) { _log.error("Failed to delete the stash time for scan {}", scanId, ex);
@Provides @Singleton @AllCompactionControlSources public List<CompactionControlSource> getAllCompactionControlSources(@LocalCompactionControl CompactionControlSource localCompactionControlSource, @ServerCluster String serverCluster, Client client, DataCenters dataCenters, @CompControlApiKey String compControlApiKey, HealthCheckRegistry healthCheckRegistry, MetricRegistry metrics) { List<CompactionControlSource> compactionControlSources = Lists.newArrayList(); for (DataCenter dataCenter : dataCenters.getAll()) { MultiThreadedServiceFactory<CompactionControlSource> clientFactory = new CompactionControlClientFactory(serverCluster, new JerseyEmoClient(client), compControlApiKey); if (dataCenter.equals(dataCenters.getSelf())) { compactionControlSources.add(localCompactionControlSource); } else { ServiceEndPoint endPoint = new ServiceEndPointBuilder() .withServiceName(clientFactory.getServiceName()) .withId(dataCenter.getName()) .withPayload(new PayloadBuilder() .withUrl(dataCenter.getServiceUri().resolve(DataStoreClient.SERVICE_PATH)) .withAdminUrl(dataCenter.getAdminUri()) .toString()) .build(); compactionControlSources.add(ServicePoolBuilder.create(CompactionControlSource.class) .withHostDiscovery(new FixedHostDiscovery(endPoint)) .withServiceFactory(clientFactory) .withCachingPolicy(ServiceCachingPolicyBuilder.getMultiThreadedClientPolicy()) .withMetricRegistry(metrics) .buildProxy(new ExponentialBackoffRetry(30, 1, 10, TimeUnit.SECONDS))); } } return compactionControlSources; }
public void clearTokenRanges(String stashId) { ensureStashTokenRangeTableExists(); _placementCache.get(_systemTablePlacement) .getKeyspace() .getCqlSession() .execute( QueryBuilder.delete() .from(STASH_TOKEN_RANGE_TABLE) .where(QueryBuilder.eq(STASH_ID_COLUMN, stashId)) .and(QueryBuilder.eq(DATA_CENTER_COLUMN, _dataCenters.getSelf().getName())) .setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM)); }