public ChangeEventSourceCoordinator(OffsetContext previousOffset, ErrorHandler errorHandler, Class<? extends SourceConnector> connectorType, String logicalName, ChangeEventSourceFactory changeEventSourceFactory, EventDispatcher<?> eventDispatcher) { this.previousOffset = previousOffset; this.errorHandler = errorHandler; this.changeEventSourceFactory = changeEventSourceFactory; this.executor = Threads.newSingleThreadExecutor(connectorType, logicalName, "change-event-source-coordinator"); this.eventDispatcher = eventDispatcher; }
public ErrorHandler(Class<? extends SourceConnector> connectorType, String logicalName, ChangeEventQueue<?> queue, Runnable onThrowable) { this.queue = queue; this.onThrowable = onThrowable; this.executor = Threads.newSingleThreadExecutor(connectorType, logicalName, "error-handler"); this.producerThrowable = new AtomicReference<>(); }
/** * Start the snapshot and return immediately. Once started, the records read from the database can be retrieved using * {@link #poll()} until that method returns {@code null}. */ @Override protected void doStart() { executorService = Threads.newSingleThreadExecutor(MySqlConnector.class, context.getConnectorConfig().getLogicalName(), "snapshot"); executorService.execute(this::execute); }
public RecordsSnapshotProducer(PostgresTaskContext taskContext, SourceInfo sourceInfo, boolean continueStreamingAfterCompletion) { super(taskContext, sourceInfo); executorService = Threads.newSingleThreadExecutor(PostgresConnector.class, taskContext.config().getLogicalName(), CONTEXT_NAME); currentRecord = new AtomicReference<>(); if (continueStreamingAfterCompletion) { // we need to create the stream producer here to make sure it creates the replication connection; // otherwise we can't stream back changes happening while the snapshot is taking place streamProducer = Optional.of(new RecordsStreamProducer(taskContext, sourceInfo)); } else { streamProducer = Optional.empty(); } }
@Override public void start(Map<String, String> props) { // Validate the configuration ... final Configuration config = Configuration.from(props); if (!config.validateAndRecord(MongoDbConnectorConfig.ALL_FIELDS, logger::error)) { throw new ConnectException("Error configuring an instance of " + getClass().getSimpleName() + "; check the logs for details"); } this.config = config; // Set up the replication context ... taskContext = new MongoDbTaskContext(config); this.connectionContext = taskContext.getConnectionContext(); PreviousContext previousLogContext = taskContext.configureLoggingContext("conn"); try { logger.info("Starting MongoDB connector and discovering replica set(s) at {}", connectionContext.hosts()); // Set up and start the thread that monitors the members of all of the replica sets ... replicaSetMonitorExecutor = Threads.newSingleThreadExecutor(MongoDbConnector.class, taskContext.serverName(), "replica-set-monitor"); ReplicaSetDiscovery monitor = new ReplicaSetDiscovery(taskContext); monitorThread = new ReplicaSetMonitorThread(monitor::getReplicaSets, connectionContext.pollPeriodInSeconds(), TimeUnit.SECONDS, Clock.SYSTEM, () -> taskContext.configureLoggingContext("disc"), this::replicaSetsChanged); replicaSetMonitorExecutor.execute(monitorThread); logger.info("Successfully started MongoDB connector, and continuing to discover changes in replica sets", connectionContext.hosts()); } finally { previousLogContext.restore(); } }
/** * Creates new producer instance for the given task context * * @param taskContext a {@link PostgresTaskContext}, never null * @param sourceInfo a {@link SourceInfo} instance to track stored offsets */ public RecordsStreamProducer(PostgresTaskContext taskContext, SourceInfo sourceInfo) { super(taskContext, sourceInfo); executorService = Threads.newSingleThreadExecutor(PostgresConnector.class, taskContext.config().getLogicalName(), CONTEXT_NAME); this.replicationStream = new AtomicReference<>(); try { this.replicationConnection = taskContext.createReplicationConnection(); } catch (SQLException e) { throw new ConnectException(e); } heartbeat = Heartbeat.create(taskContext.config().getConfig(), taskContext.topicSelector().getHeartbeatTopic(), taskContext.config().getLogicalName()); }
public ChangeEventSourceCoordinator(OffsetContext previousOffset, ErrorHandler errorHandler, Class<? extends SourceConnector> connectorType, String logicalName, ChangeEventSourceFactory changeEventSourceFactory, EventDispatcher<?> eventDispatcher) { this.previousOffset = previousOffset; this.errorHandler = errorHandler; this.changeEventSourceFactory = changeEventSourceFactory; this.executor = Threads.newSingleThreadExecutor(connectorType, logicalName, "change-event-source-coordinator"); this.eventDispatcher = eventDispatcher; }
public ErrorHandler(Class<? extends SourceConnector> connectorType, String logicalName, ChangeEventQueue<?> queue, Runnable onThrowable) { this.queue = queue; this.onThrowable = onThrowable; this.executor = Threads.newSingleThreadExecutor(connectorType, logicalName, "error-handler"); this.producerThrowable = new AtomicReference<>(); }
public RecordsSnapshotProducer(PostgresTaskContext taskContext, SourceInfo sourceInfo, boolean continueStreamingAfterCompletion) { super(taskContext, sourceInfo); executorService = Threads.newSingleThreadExecutor(PostgresConnector.class, taskContext.config().getLogicalName(), CONTEXT_NAME); currentRecord = new AtomicReference<>(); if (continueStreamingAfterCompletion) { // we need to create the stream producer here to make sure it creates the replication connection; // otherwise we can't stream back changes happening while the snapshot is taking place streamProducer = Optional.of(new RecordsStreamProducer(taskContext, sourceInfo)); } else { streamProducer = Optional.empty(); } }
/** * Creates new producer instance for the given task context * * @param taskContext a {@link PostgresTaskContext}, never null * @param sourceInfo a {@link SourceInfo} instance to track stored offsets */ public RecordsStreamProducer(PostgresTaskContext taskContext, SourceInfo sourceInfo) { super(taskContext, sourceInfo); executorService = Threads.newSingleThreadExecutor(PostgresConnector.class, taskContext.config().getLogicalName(), CONTEXT_NAME); this.replicationStream = new AtomicReference<>(); try { this.replicationConnection = taskContext.createReplicationConnection(); } catch (SQLException e) { throw new ConnectException(e); } heartbeat = Heartbeat.create(taskContext.config().getConfig(), taskContext.topicSelector().getHeartbeatTopic(), taskContext.config().getLogicalName()); }