OffsetStorageWriter offsetWriter = new OffsetStorageWriter(offsetStore, engineName, keyConverter, valueConverter); OffsetStorageReader offsetReader = new OffsetStorageReaderImpl(offsetStore, engineName,
@Override public synchronized void markProcessed(SourceRecord record) throws InterruptedException { task.commitRecord(record); recordsSinceLastCommit += 1; offsetWriter.offset(record.sourcePartition(), record.sourceOffset()); }
long started = clock.currentTimeInMillis(); long timeout = started + commitTimeout.toMillis(); if (!offsetWriter.beginFlush()) return; Future<Void> flush = offsetWriter.doFlush(this::completedFlush); if (flush == null) return; // no offsets to commit ... offsetWriter.cancelFlush(); offsetWriter.cancelFlush(); offsetWriter.cancelFlush();
void setOffsets(Stream<OffsetInfo> offsetInfoStream) { // Set offsets for each connector offsetInfoStream .collect(groupingBy(v -> v.connectorId)) .forEach( (connectorId, v) -> { logger.info("Writing offsets for " + connectorId); OffsetStorageWriter offsetWriter = new OffsetStorageWriter( offsetBackingStore, connectorId, internalConverter, internalConverter); v.forEach( offsetInfo -> { Map<String, Object> partition = TopicPartitionSerDe.asMap( new TopicPartition(offsetInfo.topic, offsetInfo.partition.intValue())); Map<String, Long> offset = MirusSourceTask.offsetMap(offsetInfo.offset); offsetWriter.offset(partition, offset); }); try { offsetBackingStore.start(); offsetWriter.beginFlush(); offsetWriter.doFlush(null).get(); offsetBackingStore.stop(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException("Unable to flush offsets for " + connectorId, e); } }); } }
/** * Confirms that data read since offset endOffset has been successfully handled by the streaming engine. * * @param endOffset the last offset read and committed by the spark engine. */ public void commit(Offset endOffset) { try { if (offsetStorageWriter.beginFlush()) { offsetStorageWriter.doFlush((error, result) -> { if (error == null) { LOGGER.debug("Flushing till offset {} with result {}", endOffset, result); } else { LOGGER.error("Unable to commit records till source offset " + endOffset, error); } }).get(30, TimeUnit.SECONDS); } } catch (Exception e) { LOGGER.error("Unable to commit records till source offset " + endOffset, e); } }
/** * Handle completion of a write. Returns true if this callback is for the current flush * operation, false if it's for an old operation that should now be ignored. */ private synchronized boolean handleFinishWrite(long flushId, Throwable error, Void result) { // Callbacks need to be handled carefully since the flush operation may have already timed // out and been cancelled. if (flushId != currentFlushId) return false; if (error != null) { cancelFlush(); } else { currentFlushId++; toFlush = null; } return true; } }
/** * Cancel a flush that has been initiated by {@link #beginFlush}. This should not be called if * {@link #doFlush} has already been invoked. It should be used if an operation performed * between beginFlush and doFlush failed. */ public synchronized void cancelFlush() { // Verify we're still flushing data to handle a race between cancelFlush() calls from up the // call stack and callbacks from the write request to underlying storage if (flushing()) { // Just recombine the data and place it back in the primary storage toFlush.putAll(data); data = toFlush; currentFlushId++; toFlush = null; } }
private void saveTestOffset() throws NoSuchFieldException, IllegalAccessException, InterruptedException, ExecutionException { EmbeddedEngine embeddedEngine = createEmbeddedEngine(); WorkerConfig workerConfig = getWorkerConfig(embeddedEngine); KafkaOffsetBackingStore kafkaOffsetBackingStore = createKafkaOffsetBackingStore(workerConfig); Converter keyConverter = getKeyConverter(embeddedEngine); Converter valueConverter = getValueConverter(embeddedEngine); OffsetStorageWriter offsetStorageWriter = new OffsetStorageWriter(kafkaOffsetBackingStore, connectorName, keyConverter, valueConverter); offsetStorageWriter.offset(ImmutableMap.of("server", "my-app-connector"), ImmutableMap.of("file", file, "pos", offset)); offsetStorageWriter.beginFlush(); offsetStorageWriter.doFlush((error, result) -> {}).get(); }
boolean flushStarted = offsetWriter.beginFlush(); Future<Void> flushFuture = offsetWriter.doFlush(new org.apache.kafka.connect.util.Callback<Void>() { @Override public void onCompletion(Throwable error, Void result) {
private synchronized void finishFailedFlush() { offsetWriter.cancelFlush(); outstandingMessages.putAll(outstandingMessagesBacklog); outstandingMessagesBacklog.clear(); flushing = false; }
/** * Performs the first step of a flush operation, snapshotting the current state. This does not * actually initiate the flush with the underlying storage. * * @return true if a flush was initiated, false if no data was available */ public synchronized boolean beginFlush() { if (flushing()) { log.error("Invalid call to OffsetStorageWriter flush() while already flushing, the " + "framework should not allow this"); throw new ConnectException("OffsetStorageWriter is already flushing"); } if (data.isEmpty()) return false; assert !flushing(); toFlush = data; data = new HashMap<>(); return true; }
private synchronized Record<byte[]> processSourceRecord(final SourceRecord srcRecord) { outstandingRecords.put(srcRecord, srcRecord); offsetWriter.offset(srcRecord.sourcePartition(), srcRecord.sourceOffset()); return new Record<byte[]>() { @Override
valueConverter ); offsetWriter = new OffsetStorageWriter( offsetStore, "pulsar-kafka-connect-adaptor",
current._3().commitRecord(current._1()); offsetStorageWriter.offset(current._1().sourcePartition(), current._1().sourceOffset()); } catch (Throwable t) { LOGGER.warn("Unable to properly commit offset " + current._2(), t);
sharedSourceTaskContext = new SharedSourceTaskContext( new OffsetStorageReaderImpl(offsetBackingStore, connectorClass.getCanonicalName(), internalConverter, internalConverter), new OffsetStorageWriter(offsetBackingStore, connectorClass.getCanonicalName(), internalConverter, internalConverter));
offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
OffsetStorageReader offsetReader = new OffsetStorageReaderImpl(offsetBackingStore, id.connector(), internalKeyConverter, internalValueConverter); OffsetStorageWriter offsetWriter = new OffsetStorageWriter(offsetBackingStore, id.connector(), internalKeyConverter, internalValueConverter); KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>(producerProps);