@Override public synchronized void markProcessed(SourceRecord record) throws InterruptedException { task.commitRecord(record); recordsSinceLastCommit += 1; offsetWriter.offset(record.sourcePartition(), record.sourceOffset()); }
void setOffsets(Stream<OffsetInfo> offsetInfoStream) { // Set offsets for each connector offsetInfoStream .collect(groupingBy(v -> v.connectorId)) .forEach( (connectorId, v) -> { logger.info("Writing offsets for " + connectorId); OffsetStorageWriter offsetWriter = new OffsetStorageWriter( offsetBackingStore, connectorId, internalConverter, internalConverter); v.forEach( offsetInfo -> { Map<String, Object> partition = TopicPartitionSerDe.asMap( new TopicPartition(offsetInfo.topic, offsetInfo.partition.intValue())); Map<String, Long> offset = MirusSourceTask.offsetMap(offsetInfo.offset); offsetWriter.offset(partition, offset); }); try { offsetBackingStore.start(); offsetWriter.beginFlush(); offsetWriter.doFlush(null).get(); offsetBackingStore.stop(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException("Unable to flush offsets for " + connectorId, e); } }); } }
private synchronized Record<byte[]> processSourceRecord(final SourceRecord srcRecord) { outstandingRecords.put(srcRecord, srcRecord); offsetWriter.offset(srcRecord.sourcePartition(), srcRecord.sourceOffset()); return new Record<byte[]>() { @Override
current._3().commitRecord(current._1()); offsetStorageWriter.offset(current._1().sourcePartition(), current._1().sourceOffset()); } catch (Throwable t) { LOGGER.warn("Unable to properly commit offset " + current._2(), t);
offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
private void saveTestOffset() throws NoSuchFieldException, IllegalAccessException, InterruptedException, ExecutionException { EmbeddedEngine embeddedEngine = createEmbeddedEngine(); WorkerConfig workerConfig = getWorkerConfig(embeddedEngine); KafkaOffsetBackingStore kafkaOffsetBackingStore = createKafkaOffsetBackingStore(workerConfig); Converter keyConverter = getKeyConverter(embeddedEngine); Converter valueConverter = getValueConverter(embeddedEngine); OffsetStorageWriter offsetStorageWriter = new OffsetStorageWriter(kafkaOffsetBackingStore, connectorName, keyConverter, valueConverter); offsetStorageWriter.offset(ImmutableMap.of("server", "my-app-connector"), ImmutableMap.of("file", file, "pos", offset)); offsetStorageWriter.beginFlush(); offsetStorageWriter.doFlush((error, result) -> {}).get(); }