long timeout = started + commitTimeout.toMillis(); if (!offsetWriter.beginFlush()) return; Future<Void> flush = offsetWriter.doFlush(this::completedFlush); if (flush == null) return; // no offsets to commit ...
/** * Confirms that data read since offset endOffset has been successfully handled by the streaming engine. * * @param endOffset the last offset read and committed by the spark engine. */ public void commit(Offset endOffset) { try { if (offsetStorageWriter.beginFlush()) { offsetStorageWriter.doFlush((error, result) -> { if (error == null) { LOGGER.debug("Flushing till offset {} with result {}", endOffset, result); } else { LOGGER.error("Unable to commit records till source offset " + endOffset, error); } }).get(30, TimeUnit.SECONDS); } } catch (Exception e) { LOGGER.error("Unable to commit records till source offset " + endOffset, e); } }
void setOffsets(Stream<OffsetInfo> offsetInfoStream) { // Set offsets for each connector offsetInfoStream .collect(groupingBy(v -> v.connectorId)) .forEach( (connectorId, v) -> { logger.info("Writing offsets for " + connectorId); OffsetStorageWriter offsetWriter = new OffsetStorageWriter( offsetBackingStore, connectorId, internalConverter, internalConverter); v.forEach( offsetInfo -> { Map<String, Object> partition = TopicPartitionSerDe.asMap( new TopicPartition(offsetInfo.topic, offsetInfo.partition.intValue())); Map<String, Long> offset = MirusSourceTask.offsetMap(offsetInfo.offset); offsetWriter.offset(partition, offset); }); try { offsetBackingStore.start(); offsetWriter.beginFlush(); offsetWriter.doFlush(null).get(); offsetBackingStore.stop(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException("Unable to flush offsets for " + connectorId, e); } }); } }
private void saveTestOffset() throws NoSuchFieldException, IllegalAccessException, InterruptedException, ExecutionException { EmbeddedEngine embeddedEngine = createEmbeddedEngine(); WorkerConfig workerConfig = getWorkerConfig(embeddedEngine); KafkaOffsetBackingStore kafkaOffsetBackingStore = createKafkaOffsetBackingStore(workerConfig); Converter keyConverter = getKeyConverter(embeddedEngine); Converter valueConverter = getValueConverter(embeddedEngine); OffsetStorageWriter offsetStorageWriter = new OffsetStorageWriter(kafkaOffsetBackingStore, connectorName, keyConverter, valueConverter); offsetStorageWriter.offset(ImmutableMap.of("server", "my-app-connector"), ImmutableMap.of("file", file, "pos", offset)); offsetStorageWriter.beginFlush(); offsetStorageWriter.doFlush((error, result) -> {}).get(); }
Future<Void> flushFuture = offsetWriter.doFlush(new org.apache.kafka.connect.util.Callback<Void>() { @Override public void onCompletion(Throwable error, Void result) {