private void updateDeletedConnectorStatus() { ClusterConfigState snapshot = configBackingStore.snapshot(); Set<String> connectors = snapshot.connectors(); for (String connector : statusBackingStore.connectors()) { if (!connectors.contains(connector)) { log.debug("Cleaning status information for connector {}", connector); onDeletion(connector); } } }
statusBackingStore.flush(); log.info("Finished stopping tasks in preparation for rebalance"); } else {
@Override public void onDeletion(String connector) { for (TaskStatus status : statusBackingStore.getAll(connector)) statusBackingStore.put(new TaskStatus(status.id(), TaskStatus.State.DESTROYED, workerId, generation())); statusBackingStore.put(new ConnectorStatus(connector, ConnectorStatus.State.DESTROYED, workerId, generation())); }
public ConnectEmbedded(Properties workerConfig, Properties... connectorConfigs) throws Exception { Time time = new SystemTime(); DistributedConfig config = new DistributedConfig(Utils.propsToStringMap(workerConfig)); KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(); offsetBackingStore.configure(config); //not sure if this is going to work but because we don't have advertised url we can get at least a fairly random String workerId = UUID.randomUUID().toString(); worker = new Worker(workerId, time, config, offsetBackingStore); StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, worker.getInternalValueConverter()); statusBackingStore.configure(config); ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(worker.getInternalValueConverter()); configBackingStore.configure(config); //advertisedUrl = "" as we don't have the rest server - hopefully this will not break anything herder = new DistributedHerder(config, time, worker, statusBackingStore, configBackingStore, ""); this.connectorConfigs = connectorConfigs; shutdownHook = new ShutdownHook(); }
@Override public void onStartup(String connector) { statusBackingStore.put(new ConnectorStatus(connector, ConnectorStatus.State.RUNNING, workerId, generation())); }
@Override public ConnectorStateInfo connectorStatus(String connName) { ConnectorStatus connector = statusBackingStore.get(connName); if (connector == null) throw new NotFoundException("No status found for connector " + connName); Collection<TaskStatus> tasks = statusBackingStore.getAll(connName); ConnectorStateInfo.ConnectorState connectorState = new ConnectorStateInfo.ConnectorState( connector.state().toString(), connector.workerId(), connector.trace()); List<ConnectorStateInfo.TaskState> taskStates = new ArrayList<>(); for (TaskStatus status : tasks) { taskStates.add(new ConnectorStateInfo.TaskState(status.id().task(), status.state().toString(), status.workerId(), status.trace())); } Collections.sort(taskStates); Map<String, String> conf = config(connName); return new ConnectorStateInfo(connName, connectorState, taskStates, conf == null ? ConnectorType.UNKNOWN : connectorTypeForClass(conf.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG))); }
@Override public void onShutdown(String connector) { statusBackingStore.putSafe(new ConnectorStatus(connector, ConnectorStatus.State.UNASSIGNED, workerId, generation())); }
@Override public ConnectorStateInfo.TaskState taskStatus(ConnectorTaskId id) { TaskStatus status = statusBackingStore.get(id); if (status == null) throw new NotFoundException("No status found for task " + id); return new ConnectorStateInfo.TaskState(id.task(), status.state().toString(), status.workerId(), status.trace()); }
public KafkaConnectDistributed(Map<String, String> workerProps) { Time time = new SystemTime(); ConnectorFactory connectorFactory = new ConnectorFactory(); DistributedConfig config = new DistributedConfig(workerProps); rest = new RestServer(config); URI advertisedUrl = rest.advertisedUrl(); String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort(); KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(); offsetBackingStore.configure(config); Worker worker = new Worker(workerId, time, connectorFactory, config, offsetBackingStore); StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, worker.getInternalValueConverter()); statusBackingStore.configure(config); ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(worker.getInternalValueConverter()); configBackingStore.configure(config); DistributedHerder herder = new DistributedHerder(config, time, worker, statusBackingStore, configBackingStore, advertisedUrl.toString()); connect = new Connect(herder, rest); try { connect.start(); } catch (Throwable t) { LOGGER.error("Failed to start Connector error", t); rest.stop(); connect.stop(); } }
@Override public void onResume(String connector) { statusBackingStore.put(new ConnectorStatus(connector, TaskStatus.State.RUNNING, workerId, generation())); }
@Override public void onShutdown(ConnectorTaskId id) { statusBackingStore.putSafe(new TaskStatus(id, TaskStatus.State.UNASSIGNED, workerId, generation())); }
statusBackingStore.configure(config);
@Override public void onStartup(ConnectorTaskId id) { statusBackingStore.put(new TaskStatus(id, TaskStatus.State.RUNNING, workerId, generation())); }
@Override public void onFailure(String connector, Throwable cause) { statusBackingStore.putSafe(new ConnectorStatus(connector, ConnectorStatus.State.FAILED, trace(cause), workerId, generation())); }
statusBackingStore.configure(configWithClientIdSuffix(workerProps, "status"));
@Override public void onPause(ConnectorTaskId id) { statusBackingStore.put(new TaskStatus(id, TaskStatus.State.PAUSED, workerId, generation())); }
@Override public void onFailure(ConnectorTaskId id, Throwable cause) { statusBackingStore.putSafe(new TaskStatus(id, TaskStatus.State.FAILED, workerId, generation(), trace(cause))); }
@Override public void onPause(String connector) { statusBackingStore.put(new ConnectorStatus(connector, ConnectorStatus.State.PAUSED, workerId, generation())); }
@Override public void onResume(ConnectorTaskId id) { statusBackingStore.put(new TaskStatus(id, TaskStatus.State.RUNNING, workerId, generation())); }