return taskConfigs; } finally { previousLogContext.restore();
/** * Run the supplied function in the temporary connector MDC context, and when complete always return the MDC context to its * state before this method was called. * * @param connectorType the type of connector; may not be null * @param connectorName the logical name of the connector; may not be null * @param contextName the name of the context; may not be null * @param operation the function to run in the new MDC context; may not be null * @throws IllegalArgumentException if any of the parameters are null */ public static void temporarilyForConnector(String connectorType, String connectorName, String contextName, Runnable operation) { if (connectorType == null) throw new IllegalArgumentException("The MDC value for the connector type may not be null"); if (connectorName == null) throw new IllegalArgumentException("The MDC value for the connector name may not be null"); if (contextName == null) throw new IllegalArgumentException("The MDC value for the connector context may not be null"); if (operation == null) throw new IllegalArgumentException("The operation may not be null"); PreviousContext previous = new PreviousContext(); try { forConnector(connectorType, connectorName, contextName); operation.run(); } finally { previous.restore(); } }
throw new ConnectException(e); } finally { previousContext.restore();
@Override protected synchronized void start(BlockingConsumer<ChangeEvent> eventConsumer, Consumer<Throwable> failureConsumer) { LoggingContext.PreviousContext previousContext = taskContext.configureLoggingContext(CONTEXT_NAME); try { if (executorService.isShutdown()) { logger.info("Streaming will not start, stop already requested"); return; } if (sourceInfo.hasLastKnownPosition()) { // start streaming from the last recorded position in the offset Long lsn = sourceInfo.lsn(); if (logger.isDebugEnabled()) { logger.debug("retrieved latest position from stored offset '{}'", ReplicationConnection.format(lsn)); } replicationStream.compareAndSet(null, replicationConnection.startStreaming(lsn)); } else { logger.info("no previous LSN found in Kafka, streaming from the latest xlogpos or flushed LSN..."); replicationStream.compareAndSet(null, replicationConnection.startStreaming()); } // refresh the schema so we have a latest view of the DB tables taskContext.refreshSchema(true); this.lastCompletelyProcessedLsn = sourceInfo.lsn(); // the new thread will inherit it's parent MDC executorService.submit(() -> streamChanges(eventConsumer, failureConsumer)); } catch (Throwable t) { throw new ConnectException(t.getCause() != null ? t.getCause() : t); } finally { previousContext.restore(); } }
/** * Returns the next batch of elements from this queue. May be empty in case no * elements have arrived in the maximum waiting time. * * @throws InterruptedException * if this thread has been interrupted while waiting for more * elements to arrive */ public List<T> poll() throws InterruptedException { LoggingContext.PreviousContext previousContext = loggingContextSupplier.get(); try { LOGGER.debug("polling records..."); List<T> records = new ArrayList<>(); final Timer timeout = Threads.timer(Clock.SYSTEM, Temporals.max(pollInterval, ConfigurationDefaults.RETURN_CONTROL_INTERVAL)); while (!timeout.expired() && queue.drainTo(records, maxBatchSize) == 0) { throwProducerFailureIfPresent(); LOGGER.debug("no records available yet, sleeping a bit..."); // no records yet, so wait a bit metronome.pause(); LOGGER.debug("checking for more records..."); } return records; } finally { previousContext.restore(); } }
@Override protected synchronized void commit(long lsn) { LoggingContext.PreviousContext previousContext = taskContext.configureLoggingContext(CONTEXT_NAME); try { ReplicationStream replicationStream = this.replicationStream.get(); if (replicationStream != null) { if (logger.isDebugEnabled()) { logger.debug("Flushing LSN to server: {}", LogSequenceNumber.valueOf(lsn)); } // tell the server the point up to which we've processed data, so it can be free to recycle WAL segments replicationStream.flushLsn(lsn); } else { logger.debug("Streaming has already stopped, ignoring commit callback..."); } } catch (SQLException e) { throw new ConnectException(e); } finally { previousContext.restore(); } }
/** * Run the supplied function in the temporary connector MDC context, and when complete always return the MDC context to its * state before this method was called. * * @param connectorType the type of connector; may not be null * @param connectorName the logical name of the connector; may not be null * @param contextName the name of the context; may not be null * @param operation the function to run in the new MDC context; may not be null * @throws IllegalArgumentException if any of the parameters are null */ public static void temporarilyForConnector(String connectorType, String connectorName, String contextName, Runnable operation) { if (connectorType == null) throw new IllegalArgumentException("The MDC value for the connector type may not be null"); if (connectorName == null) throw new IllegalArgumentException("The MDC value for the connector name may not be null"); if (contextName == null) throw new IllegalArgumentException("The MDC value for the connector context may not be null"); if (operation == null) throw new IllegalArgumentException("The operation may not be null"); PreviousContext previous = new PreviousContext(); try { forConnector(connectorType, connectorName, contextName); operation.run(); } finally { previous.restore(); } }
@Override protected void start(BlockingConsumer<ChangeEvent> eventConsumer, Consumer<Throwable> failureConsumer) { // MDC should be in inherited from parent to child threads LoggingContext.PreviousContext previousContext = taskContext.configureLoggingContext(CONTEXT_NAME); try { CompletableFuture.runAsync(this::delaySnapshotIfNeeded, executorService) .thenRun(() -> this.takeSnapshot(eventConsumer)) .thenRun(() -> this.startStreaming(eventConsumer, failureConsumer)) .exceptionally(e -> { logger.error("unexpected exception", e.getCause() != null ? e.getCause() : e); // always stop to clean up data stop(); failureConsumer.accept(e); return null; }); } finally { previousContext.restore(); } }
/** * Configure for a connector the logger's Mapped Diagnostic Context (MDC) properties for the thread making this call. * * @param connectorType the type of connector; may not be null * @param connectorName the name of the connector; may not be null * @param contextName the name of the context; may not be null * @return the previous MDC context; never null * @throws IllegalArgumentException if any of the parameters are null */ public static PreviousContext forConnector(String connectorType, String connectorName, String contextName) { if (connectorType == null) throw new IllegalArgumentException("The MDC value for the connector type may not be null"); if (connectorName == null) throw new IllegalArgumentException("The MDC value for the connector name may not be null"); if (contextName == null) throw new IllegalArgumentException("The MDC value for the connector context may not be null"); PreviousContext previous = new PreviousContext(); MDC.put(CONNECTOR_TYPE, connectorType); MDC.put(CONNECTOR_NAME, connectorName); MDC.put(CONNECTOR_CONTEXT, contextName); return previous; }
@Override protected synchronized void stop() { LoggingContext.PreviousContext previousContext = taskContext.configureLoggingContext(CONTEXT_NAME); try { if (!cleanupExecuted.compareAndSet(false, true)) { logger.debug("already stopped...."); return; } closeConnections(); } finally { replicationStream.set(null); executorService.shutdownNow(); previousContext.restore(); } }
prevLoggingContext.restore();
logger.info("Successfully started MongoDB connector task with {} thread(s) for replica sets {}", numThreads, replicaSets); } finally { previousLogContext.restore();
throw new ConnectException(e); } finally { previousContext.restore();
@Override public void start(Map<String, String> props) { // Validate the configuration ... final Configuration config = Configuration.from(props); if (!config.validateAndRecord(MongoDbConnectorConfig.ALL_FIELDS, logger::error)) { throw new ConnectException("Error configuring an instance of " + getClass().getSimpleName() + "; check the logs for details"); } this.config = config; // Set up the replication context ... taskContext = new MongoDbTaskContext(config); this.connectionContext = taskContext.getConnectionContext(); PreviousContext previousLogContext = taskContext.configureLoggingContext("conn"); try { logger.info("Starting MongoDB connector and discovering replica set(s) at {}", connectionContext.hosts()); // Set up and start the thread that monitors the members of all of the replica sets ... replicaSetMonitorExecutor = Threads.newSingleThreadExecutor(MongoDbConnector.class, taskContext.serverName(), "replica-set-monitor"); ReplicaSetDiscovery monitor = new ReplicaSetDiscovery(taskContext); monitorThread = new ReplicaSetMonitorThread(monitor::getReplicaSets, connectionContext.pollPeriodInSeconds(), TimeUnit.SECONDS, Clock.SYSTEM, () -> taskContext.configureLoggingContext("disc"), this::replicaSetsChanged); replicaSetMonitorExecutor.execute(monitorThread); logger.info("Successfully started MongoDB connector, and continuing to discover changes in replica sets", connectionContext.hosts()); } finally { previousLogContext.restore(); } }
@Override public void accept(List<SourceRecord> records) { if (records.isEmpty()) return; if (!logger.isInfoEnabled()) return; summaryByReplicaSet.clear(); records.forEach(record -> { String replicaSetName = SourceInfo.replicaSetNameForPartition(record.sourcePartition()); if (replicaSetName != null) { summaryByReplicaSet.computeIfAbsent(replicaSetName, rsName -> new ReplicaSetSummary()).add(record); } }); if (!summaryByReplicaSet.isEmpty()) { PreviousContext prevContext = taskContext.configureLoggingContext("task"); try { summaryByReplicaSet.forEach((rsName, summary) -> { logger.info("{} records sent for replica set '{}', last offset: {}", summary.recordCount(), rsName, summary.lastOffset()); }); } finally { prevContext.restore(); } } } }
/** * Returns the next batch of elements from this queue. May be empty in case no * elements have arrived in the maximum waiting time. * * @throws InterruptedException * if this thread has been interrupted while waiting for more * elements to arrive */ public List<T> poll() throws InterruptedException { LoggingContext.PreviousContext previousContext = loggingContextSupplier.get(); try { LOGGER.debug("polling records..."); List<T> records = new ArrayList<>(); final Timer timeout = Threads.timer(Clock.SYSTEM, Temporals.max(pollInterval, ConfigurationDefaults.RETURN_CONTROL_INTERVAL)); while (!timeout.expired() && queue.drainTo(records, maxBatchSize) == 0) { throwProducerFailureIfPresent(); LOGGER.debug("no records available yet, sleeping a bit..."); // no records yet, so wait a bit metronome.pause(); LOGGER.debug("checking for more records..."); } return records; } finally { previousContext.restore(); } }
@Override protected synchronized void start(BlockingConsumer<ChangeEvent> eventConsumer, Consumer<Throwable> failureConsumer) { LoggingContext.PreviousContext previousContext = taskContext.configureLoggingContext(CONTEXT_NAME); try { if (executorService.isShutdown()) { logger.info("Streaming will not start, stop already requested"); return; } if (sourceInfo.hasLastKnownPosition()) { // start streaming from the last recorded position in the offset Long lsn = sourceInfo.lsn(); if (logger.isDebugEnabled()) { logger.debug("retrieved latest position from stored offset '{}'", ReplicationConnection.format(lsn)); } replicationStream.compareAndSet(null, replicationConnection.startStreaming(lsn)); } else { logger.info("no previous LSN found in Kafka, streaming from the latest xlogpos or flushed LSN..."); replicationStream.compareAndSet(null, replicationConnection.startStreaming()); } // refresh the schema so we have a latest view of the DB tables taskContext.refreshSchema(true); this.lastCompletelyProcessedLsn = sourceInfo.lsn(); // the new thread will inherit it's parent MDC executorService.submit(() -> streamChanges(eventConsumer, failureConsumer)); } catch (Throwable t) { throw new ConnectException(t.getCause() != null ? t.getCause() : t); } finally { previousContext.restore(); } }
@Override protected synchronized void commit(long lsn) { LoggingContext.PreviousContext previousContext = taskContext.configureLoggingContext(CONTEXT_NAME); try { ReplicationStream replicationStream = this.replicationStream.get(); if (replicationStream != null) { if (logger.isDebugEnabled()) { logger.debug("Flushing LSN to server: {}", LogSequenceNumber.valueOf(lsn)); } // tell the server the point up to which we've processed data, so it can be free to recycle WAL segments replicationStream.flushLsn(lsn); } else { logger.debug("Streaming has already stopped, ignoring commit callback..."); } } catch (SQLException e) { throw new ConnectException(e); } finally { previousContext.restore(); } }