@Override public void resumeConnector(String connector) { if (!configBackingStore.contains(connector)) throw new NotFoundException("Unknown connector " + connector); configBackingStore.putTargetState(connector, TargetState.STARTED); }
private void removeConnectorTasks(String connName) { Collection<ConnectorTaskId> tasks = configState.tasks(connName); if (!tasks.isEmpty()) { worker.stopAndAwaitTasks(tasks); configBackingStore.removeTaskConfigs(connName); } }
@Override public void onConnectorConfigUpdate(String connector) { // TODO: move connector configuration update handling here to be consistent with // the semantics of the config backing store synchronized (StandaloneHerder.this) { configState = configBackingStore.snapshot(); } }
public ConnectEmbedded(Properties workerConfig, Properties... connectorConfigs) throws Exception { Time time = new SystemTime(); DistributedConfig config = new DistributedConfig(Utils.propsToStringMap(workerConfig)); KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(); offsetBackingStore.configure(config); //not sure if this is going to work but because we don't have advertised url we can get at least a fairly random String workerId = UUID.randomUUID().toString(); worker = new Worker(workerId, time, config, offsetBackingStore); StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, worker.getInternalValueConverter()); statusBackingStore.configure(config); ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(worker.getInternalValueConverter()); configBackingStore.configure(config); //advertisedUrl = "" as we don't have the rest server - hopefully this will not break anything herder = new DistributedHerder(config, time, worker, statusBackingStore, configBackingStore, ""); this.connectorConfigs = connectorConfigs; shutdownHook = new ShutdownHook(); }
/** * Try to read to the end of the config log within the given timeout * @param timeoutMs maximum time to wait to sync to the end of the log * @return true if successful, false if timed out */ private boolean readConfigToEnd(long timeoutMs) { log.info("Current config state offset {} is behind group assignment {}, reading to end of config log", configState.offset(), assignment.offset()); try { configBackingStore.refresh(timeoutMs, TimeUnit.MILLISECONDS); configState = configBackingStore.snapshot(); log.info("Finished reading to end of log and updated config snapshot, new config log offset: {}", configState.offset()); return true; } catch (TimeoutException e) { // in case reading the log takes too long, leave the group to ensure a quick rebalance (although by default we should be out of the group already) // and back off to avoid a tight loop of rejoin-attempt-to-catch-up-leave log.warn("Didn't reach end of config log quickly enough", e); member.maybeLeaveGroup(); backoff(workerUnsyncBackoffMs); return false; } }
private void updateConnectorTasks(String connName) { if (!worker.isRunning(connName)) { log.info("Skipping reconfiguration of connector {} since it is not running", connName); return; } List<Map<String, String>> newTaskConfigs = recomputeTaskConfigs(connName); List<Map<String, String>> oldTaskConfigs = configState.allTaskConfigs(connName); if (!newTaskConfigs.equals(oldTaskConfigs)) { removeConnectorTasks(connName); List<Map<String, String>> rawTaskConfigs = reverseTransform(connName, configState, newTaskConfigs); configBackingStore.putTaskConfigs(connName, rawTaskConfigs); createConnectorTasks(connName, configState.targetState(connName)); } }
@Override public Void call() throws Exception { log.trace("Handling connector config request {}", connName); if (!isLeader()) { callback.onCompletion(new NotLeaderException("Only the leader can set connector configs.", leaderUrl()), null); return null; } if (!configState.contains(connName)) { callback.onCompletion(new NotFoundException("Connector " + connName + " not found"), null); } else { log.trace("Removing connector config {} {}", connName, configState.connectors()); configBackingStore.removeConnectorConfig(connName); callback.onCompletion(null, new Created<ConnectorInfo>(false, null)); } return null; } },
@Override public Void call() throws Exception { if (maybeAddConfigErrors(validateConnectorConfig(config), callback)) { return null; } log.trace("Handling connector config request {}", connName); if (!isLeader()) { callback.onCompletion(new NotLeaderException("Only the leader can set connector configs.", leaderUrl()), null); return null; } boolean exists = configState.contains(connName); if (!allowReplace && exists) { callback.onCompletion(new AlreadyExistsException("Connector " + connName + " already exists"), null); return null; } log.trace("Submitting connector config {} {} {}", connName, allowReplace, configState.connectors()); configBackingStore.putConnectorConfig(connName, config); // Note that we use the updated connector config despite the fact that we don't have an updated // snapshot yet. The existing task info should still be accurate. Map<String, String> map = configState.connectorConfig(connName); ConnectorInfo info = new ConnectorInfo(connName, config, configState.tasks(connName), map == null ? null : connectorTypeForClass(map.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG))); callback.onCompletion(null, new Created<>(!exists, info)); return null; } },
DistributedHerder(DistributedConfig config, Worker worker, String workerId, String kafkaClusterId, StatusBackingStore statusBackingStore, ConfigBackingStore configBackingStore, WorkerGroupMember member, String restUrl, ConnectMetrics metrics, Time time) { super(worker, workerId, kafkaClusterId, statusBackingStore, configBackingStore); this.time = time; this.herderMetrics = new HerderMetrics(metrics); this.workerGroupId = config.getString(DistributedConfig.GROUP_ID_CONFIG); this.workerSyncTimeoutMs = config.getInt(DistributedConfig.WORKER_SYNC_TIMEOUT_MS_CONFIG); this.workerTasksShutdownTimeoutMs = config.getLong(DistributedConfig.TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_CONFIG); this.workerUnsyncBackoffMs = config.getInt(DistributedConfig.WORKER_UNSYNC_BACKOFF_MS_CONFIG); this.member = member != null ? member : new WorkerGroupMember(config, restUrl, this.configBackingStore, new RebalanceListener(), time); this.herderExecutor = new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingDeque<Runnable>(1), new ThreadFactory() { @Override public Thread newThread(Runnable herder) { return new Thread(herder, "DistributedHerder"); } }); this.forwardRequestExecutor = Executors.newSingleThreadExecutor(); this.startAndStopExecutor = Executors.newFixedThreadPool(START_STOP_THREAD_POOL_SIZE); this.config = config;
public KafkaConnectDistributed(Map<String, String> workerProps) { Time time = new SystemTime(); ConnectorFactory connectorFactory = new ConnectorFactory(); DistributedConfig config = new DistributedConfig(workerProps); rest = new RestServer(config); URI advertisedUrl = rest.advertisedUrl(); String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort(); KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(); offsetBackingStore.configure(config); Worker worker = new Worker(workerId, time, connectorFactory, config, offsetBackingStore); StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, worker.getInternalValueConverter()); statusBackingStore.configure(config); ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(worker.getInternalValueConverter()); configBackingStore.configure(config); DistributedHerder herder = new DistributedHerder(config, time, worker, statusBackingStore, configBackingStore, advertisedUrl.toString()); connect = new Connect(herder, rest); try { connect.start(); } catch (Throwable t) { LOGGER.error("Failed to start Connector error", t); rest.stop(); connect.stop(); } }
@Override public Void call() throws Exception { if (!isLeader()) callback.onCompletion(new NotLeaderException("Only the leader may write task configurations.", leaderUrl()), null); else if (!configState.contains(connName)) callback.onCompletion(new NotFoundException("Connector " + connName + " not found"), null); else { configBackingStore.putTaskConfigs(connName, configs); callback.onCompletion(null, null); } return null; } },
@Override public synchronized void deleteConnectorConfig(String connName, Callback<Created<ConnectorInfo>> callback) { try { if (!configState.contains(connName)) { // Deletion, must already exist callback.onCompletion(new NotFoundException("Connector " + connName + " not found", null), null); return; } removeConnectorTasks(connName); worker.stopConnector(connName); configBackingStore.removeConnectorConfig(connName); onDeletion(connName); callback.onCompletion(null, new Created<ConnectorInfo>(false, null)); } catch (ConnectException e) { callback.onCompletion(e, null); } }
@Override public synchronized void putConnectorConfig(String connName, final Map<String, String> config, boolean allowReplace, final Callback<Created<ConnectorInfo>> callback) { try { if (maybeAddConfigErrors(validateConnectorConfig(config), callback)) { return; } boolean created = false; if (configState.contains(connName)) { if (!allowReplace) { callback.onCompletion(new AlreadyExistsException("Connector " + connName + " already exists"), null); return; } worker.stopConnector(connName); } else { created = true; } configBackingStore.putConnectorConfig(connName, config); if (!startConnector(connName)) { callback.onCompletion(new ConnectException("Failed to start connector: " + connName), null); return; } updateConnectorTasks(connName); callback.onCompletion(null, new Created<>(created, createConnectorInfo(connName))); } catch (ConnectException e) { callback.onCompletion(e, null); } }
@Override public void pauseConnector(String connector) { if (!configBackingStore.contains(connector)) throw new NotFoundException("Unknown connector " + connector); configBackingStore.putTargetState(connector, TargetState.PAUSED); }
@Override public void onTaskConfigUpdate(Collection<ConnectorTaskId> tasks) { synchronized (StandaloneHerder.this) { configState = configBackingStore.snapshot(); } }
List<Map<String, String>> rawTaskProps = reverseTransform(connName, configState, taskProps); if (isLeader()) { configBackingStore.putTaskConfigs(connName, rawTaskProps); cb.onCompletion(null, null); } else {
@Override public void onConnectorConfigRemove(String connector) { synchronized (StandaloneHerder.this) { configState = configBackingStore.snapshot(); } }
private void updateDeletedConnectorStatus() { ClusterConfigState snapshot = configBackingStore.snapshot(); Set<String> connectors = snapshot.connectors(); for (String connector : statusBackingStore.connectors()) { if (!connectors.contains(connector)) { log.debug("Cleaning status information for connector {}", connector); onDeletion(connector); } } }
@Override public void onConnectorTargetStateChange(String connector) { synchronized (StandaloneHerder.this) { configState = configBackingStore.snapshot(); TargetState targetState = configState.targetState(connector); worker.setTargetState(connector, targetState); if (targetState == TargetState.STARTED) updateConnectorTasks(connector); } } }
@Override public List<ProtocolMetadata> metadata() { configSnapshot = configStorage.snapshot(); ConnectProtocol.WorkerState workerState = new ConnectProtocol.WorkerState(restUrl, configSnapshot.offset()); ByteBuffer metadata = ConnectProtocol.serializeMetadata(workerState); return Collections.singletonList(new ProtocolMetadata(DEFAULT_SUBPROTOCOL, metadata)); }