public ChangeEventQueue<T> build() { return new ChangeEventQueue<T>(pollInterval, maxQueueSize, maxBatchSize, loggingContextSupplier); } }
private void enqueueHeartbeat(SourceRecord record) throws InterruptedException { queue.enqueue(new DataChangeEvent(record)); }
public void setProducerThrowable(Throwable producerThrowable) { LOGGER.error("Producer failure", producerThrowable); boolean first = this.producerThrowable.compareAndSet(null, producerThrowable); if (first) { queue.producerFailure(producerThrowable); executor.execute(() -> onThrowable.run()); } }
this.queue = new ChangeEventQueue.Builder<SourceRecord>() .pollInterval(connectorConfig.getPollInterval()) .maxBatchSize(connectorConfig.getMaxBatchSize()) .maxQueueSize(connectorConfig.getMaxQueueSize()) .loggingContextSupplier(this::getLoggingContext) .build();
@Override public List<SourceRecord> poll() throws InterruptedException { List<ChangeEvent> events = changeEventQueue.poll(); if (events.size() > 0) { lastCompletelyProcessedLsn = events.get(events.size() - 1).getLastCompletelyProcessedLsn(); } return events.stream().map(ChangeEvent::getRecord).collect(Collectors.toList()); }
/** * Returns the next batch of elements from this queue. May be empty in case no * elements have arrived in the maximum waiting time. * * @throws InterruptedException * if this thread has been interrupted while waiting for more * elements to arrive */ public List<T> poll() throws InterruptedException { LoggingContext.PreviousContext previousContext = loggingContextSupplier.get(); try { LOGGER.debug("polling records..."); List<T> records = new ArrayList<>(); final Timer timeout = Threads.timer(Clock.SYSTEM, Temporals.max(pollInterval, ConfigurationDefaults.RETURN_CONTROL_INTERVAL)); while (!timeout.expired() && queue.drainTo(records, maxBatchSize) == 0) { throwProducerFailureIfPresent(); LOGGER.debug("no records available yet, sleeping a bit..."); // no records yet, so wait a bit metronome.pause(); LOGGER.debug("checking for more records..."); } return records; } finally { previousContext.restore(); } }
@Override public int getQueueRemainingCapacity() { return changeEventQueueMetrics.remainingCapacity(); } }
@Override public int getQueueTotalCapacity() { return changeEventQueueMetrics.totalCapacity(); }
changeEventQueue = new ChangeEventQueue.Builder<ChangeEvent>() .pollInterval(connectorConfig.getPollInterval()) .maxBatchSize(connectorConfig.getMaxBatchSize()) .maxQueueSize(connectorConfig.getMaxQueueSize()) .loggingContextSupplier(() -> taskContext.configureLoggingContext(CONTEXT_NAME)) .build();
@Override public List<SourceRecord> poll() throws InterruptedException { final List<DataChangeEvent> records = queue.poll(); final List<SourceRecord> sourceRecords = records.stream() .map(DataChangeEvent::getRecord) .collect(Collectors.toList()); if (!sourceRecords.isEmpty()) { this.lastOffset = sourceRecords.get(sourceRecords.size() - 1).sourceOffset(); } return sourceRecords; }
public ChangeEventQueue<T> build() { return new ChangeEventQueue<T>(pollInterval, maxQueueSize, maxBatchSize, loggingContextSupplier); } }
private void enqueueHeartbeat(SourceRecord record) throws InterruptedException { queue.enqueue(new DataChangeEvent(record)); }
public void setProducerThrowable(Throwable producerThrowable) { LOGGER.error("Producer failure", producerThrowable); boolean first = this.producerThrowable.compareAndSet(null, producerThrowable); if (first) { queue.producerFailure(producerThrowable); executor.execute(() -> onThrowable.run()); } }
/** * Returns the next batch of elements from this queue. May be empty in case no * elements have arrived in the maximum waiting time. * * @throws InterruptedException * if this thread has been interrupted while waiting for more * elements to arrive */ public List<T> poll() throws InterruptedException { LoggingContext.PreviousContext previousContext = loggingContextSupplier.get(); try { LOGGER.debug("polling records..."); List<T> records = new ArrayList<>(); final Timer timeout = Threads.timer(Clock.SYSTEM, Temporals.max(pollInterval, ConfigurationDefaults.RETURN_CONTROL_INTERVAL)); while (!timeout.expired() && queue.drainTo(records, maxBatchSize) == 0) { throwProducerFailureIfPresent(); LOGGER.debug("no records available yet, sleeping a bit..."); // no records yet, so wait a bit metronome.pause(); LOGGER.debug("checking for more records..."); } return records; } finally { previousContext.restore(); } }
this.queue = new ChangeEventQueue.Builder<DataChangeEvent>() .pollInterval(connectorConfig.getPollInterval()) .maxBatchSize(connectorConfig.getMaxBatchSize()) .maxQueueSize(connectorConfig.getMaxQueueSize()) .loggingContextSupplier(() -> taskContext.configureLoggingContext(CONTEXT_NAME)) .build();
@Override public List<SourceRecord> poll() throws InterruptedException { if (replicatorError != null) { throw new ConnectException("Failing connector task, at least one of the replicators has failed"); } List<SourceRecord> records = queue.poll(); recordSummarizer.accept(records); return records; }
this.queue = new ChangeEventQueue.Builder<DataChangeEvent>() .pollInterval(connectorConfig.getPollInterval()) .maxBatchSize(connectorConfig.getMaxBatchSize()) .maxQueueSize(connectorConfig.getMaxQueueSize()) .loggingContextSupplier(() -> taskContext.configureLoggingContext(CONTEXT_NAME)) .build();
@Override public List<SourceRecord> poll() throws InterruptedException { List<DataChangeEvent> records = queue.poll(); List<SourceRecord> sourceRecords = records.stream() .map(DataChangeEvent::getRecord) .collect(Collectors.toList()); if (!sourceRecords.isEmpty()) { this.lastOffset = sourceRecords.get(sourceRecords.size() - 1).sourceOffset(); } return sourceRecords; }
changeEventQueue = new ChangeEventQueue.Builder<ChangeEvent>() .pollInterval(connectorConfig.getPollInterval()) .maxBatchSize(connectorConfig.getMaxBatchSize()) .maxQueueSize(connectorConfig.getMaxQueueSize()) .loggingContextSupplier(() -> taskContext.configureLoggingContext(CONTEXT_NAME)) .build();
@Override public List<SourceRecord> poll() throws InterruptedException { List<ChangeEvent> events = changeEventQueue.poll(); if (events.size() > 0) { lastCompletelyProcessedLsn = events.get(events.size() - 1).getLastCompletelyProcessedLsn(); } return events.stream().map(ChangeEvent::getRecord).collect(Collectors.toList()); }