/** * Loads the connector's persistent offset (if present) via the given loader. */ @Override protected OffsetContext getPreviousOffset(OffsetContext.Loader loader) { Map<String, ?> partition = loader.getPartition(); Map<String, Object> previousOffset = context.offsetStorageReader() .offsets(Collections.singleton(partition)) .get(partition); if (previousOffset != null) { OffsetContext offsetContext = loader.load(previousOffset); LOGGER.info("Found previous offset {}", offsetContext); return offsetContext; } else { return null; } }
/** * Loads the connector's persistent offset (if present) via the given loader. */ protected OffsetContext getPreviousOffset(OffsetContext.Loader loader) { Map<String, ?> partition = loader.getPartition(); Map<String, Object> previousOffset = context.offsetStorageReader() .offsets(Collections.singleton(partition)) .get(partition); if (previousOffset != null) { OffsetContext offsetContext = loader.load(previousOffset); LOGGER.info("Found previous offset {}", offsetContext); return offsetContext; } else { return null; } } }
context.offsetStorageReader().offsets(partitions).forEach(source::setOffsetFor);
Map<String, ?> lastOffset = context.offsetStorageReader().offset(partition); long lastId = lastOffset == null ? 0L : (Long) lastOffset.get("id");
Map<String, Object> existingOffset = context.offsetStorageReader().offset(sourceInfo.partition()); LoggingContext.PreviousContext previousContext = taskContext.configureLoggingContext(CONTEXT_NAME); try {
boolean snapshotEventsAreInserts = true; Map<String, String> partition = Collect.hashMapOf(SourceInfo.SERVER_PARTITION_KEY, serverName); Map<String, ?> offsets = getRestartOffset(context.offsetStorageReader().offset(partition)); final SourceInfo source; if (offsets != null) {
/** * Loads the current saved offsets. */ private void loadOffsets() { List<Map<String, String>> partitions = new ArrayList<>(); for (String db : databases) { Map<String, String> partition = Collections.singletonMap("mongodb", db); partitions.add(partition); } offsets.putAll(context.offsetStorageReader().offsets(partitions)); } }
@Override public void start(Map<String, String> props) { final long throughput; try { name = props.get(NAME_CONFIG); id = Integer.parseInt(props.get(ID_CONFIG)); topic = props.get(TOPIC_CONFIG); throughput = Long.parseLong(props.get(THROUGHPUT_CONFIG)); } catch (NumberFormatException e) { throw new ConnectException("Invalid VerifiableSourceTask configuration", e); } partition = Collections.singletonMap(ID_FIELD, id); Map<String, Object> previousOffset = this.context.offsetStorageReader().offset(partition); if (previousOffset != null) seqno = (Long) previousOffset.get(SEQNO_FIELD) + 1; else seqno = 0; startingSeqno = seqno; throttler = new ThroughputThrottler(throughput, System.currentTimeMillis()); log.info("Started VerifiableSourceTask {}-{} producing to topic {} resuming from seqno {}", name, id, topic, startingSeqno); }
@Override public void start(Map<String, String> props) { final long throughput; try { name = props.get(NAME_CONFIG); id = Integer.parseInt(props.get(ID_CONFIG)); topic = props.get(TOPIC_CONFIG); maxNumMsgs = Long.parseLong(props.get(NUM_MSGS_CONFIG)); multipleSchema = Boolean.parseBoolean(props.get(MULTIPLE_SCHEMA_CONFIG)); partitionCount = Integer.parseInt(props.containsKey(PARTITION_COUNT_CONFIG) ? props.get(PARTITION_COUNT_CONFIG) : "1"); throughput = Long.parseLong(props.get(THROUGHPUT_CONFIG)); } catch (NumberFormatException e) { throw new ConnectException("Invalid SchemaSourceTask configuration", e); } throttler = new ThroughputThrottler(throughput, System.currentTimeMillis()); partition = Collections.singletonMap(ID_FIELD, id); Map<String, Object> previousOffset = this.context.offsetStorageReader().offset(partition); if (previousOffset != null) { seqno = (Long) previousOffset.get(SEQNO_FIELD) + 1; } else { seqno = 0; } startingSeqno = seqno; count = 0; log.info("Started SchemaSourceTask {}-{} producing to topic {} resuming from seqno {}", name, id, topic, startingSeqno); }
private void initializeLastVariables(){ Map<String, Object> lastSourceOffset = null; lastSourceOffset = context.offsetStorageReader().offset(sourcePartition()); if( lastSourceOffset == null){ // we haven't fetched anything yet, so we initialize to 7 days ago nextQuerySince = config.getSince(); lastIssueNumber = -1; } else { Object updatedAt = lastSourceOffset.get(UPDATED_AT_FIELD); Object issueNumber = lastSourceOffset.get(NUMBER_FIELD); Object nextPage = lastSourceOffset.get(NEXT_PAGE_FIELD); if(updatedAt != null && (updatedAt instanceof String)){ nextQuerySince = Instant.parse((String) updatedAt); } if(issueNumber != null && (issueNumber instanceof String)){ lastIssueNumber = Integer.valueOf((String) issueNumber); } if (nextPage != null && (nextPage instanceof String)){ nextPageToVisit = Integer.valueOf((String) nextPage); } } }
/** * Loads the connector's persistent offset (if present) via the given loader. */ protected OffsetContext getPreviousOffset(OffsetContext.Loader loader) { Map<String, ?> partition = loader.getPartition(); Map<String, Object> previousOffset = context.offsetStorageReader() .offsets(Collections.singleton(partition)) .get(partition); if (previousOffset != null) { OffsetContext offsetContext = loader.load(previousOffset); LOGGER.info("Found previous offset {}", offsetContext); return offsetContext; } else { return null; } } }
partitionIds.stream().map(TopicPartitionSerDe::asMap).collect(Collectors.toList()); Map<Map<String, Object>, Map<String, Object>> offsets = context.offsetStorageReader().offsets(partitionMaps); if (offsets == null) { return;
storageAdapter = new ReadYourWritesOffsetStorageAdapter(context.offsetStorageReader(), jobUrls, partitions);
@Override public List<SourceRecord> poll() throws InterruptedException { while (stop != null && !stop.get() && !policy.hasEnded()) { log.trace("Polling for new data"); final List<SourceRecord> results = new ArrayList<>(); List<FileMetadata> files = filesToProcess(); files.forEach(metadata -> { try (FileReader reader = policy.offer(metadata, context.offsetStorageReader())) { log.info("Processing records for file {}", metadata); while (reader.hasNext()) { results.add(convert(metadata, reader.currentOffset(), reader.next())); } } catch (ConnectException | IOException e) { //when an exception happens reading a file, the connector continues log.error("Error reading file from FS: " + metadata.getPath() + ". Keep going...", e); } }); return results; } return null; }
@Override public void start(Map<String, String> map) { super.start(map); Service service = service(this, this.context.offsetStorageReader()); List<Service> services = Arrays.asList(service); this.serviceManager = new ServiceManager(services); log.info("Starting Services"); this.serviceManager.startAsync(); try { this.serviceManager.awaitHealthy(60, TimeUnit.SECONDS); } catch (TimeoutException e) { throw new ConnectException("Timeout while starting service.", e); } }
private void expectOffsetLookupReturnNone() { EasyMock.expect(context.offsetStorageReader()).andReturn(offsetStorageReader); EasyMock.expect(offsetStorageReader.offset(EasyMock.anyObject(Map.class))).andReturn(null); } }
@Override public void start(Map<String, String> map) { super.start(map); Service service = service(this, this.context.offsetStorageReader()); List<Service> services = Arrays.asList(service); this.serviceManager = new ServiceManager(services); log.info("Starting Services"); this.serviceManager.startAsync(); try { this.serviceManager.awaitHealthy(60, TimeUnit.SECONDS); } catch (TimeoutException e) { throw new ConnectException("Timeout while starting service.", e); } }
leaderTopicPartition.toTopicPartitionString())) .collect(Collectors.toList()); Map<String, Long> topicPartitionStringsOffsets = context.offsetStorageReader().offsets(offsetLookupPartitions) .entrySet().stream() .filter(e -> e != null && e.getKey() != null && e.getKey().get(TOPIC_PARTITION_KEY) != null
logMinerStartScr=logMinerStartScr+logMinerOptions+") \n; end;"; logMinerStartStmt=dbConn.prepareCall(logMinerStartScr); Map<String,Object> offset = context.offsetStorageReader().offset(Collections.singletonMap(LOG_MINER_OFFSET_FIELD, dbName)); streamOffsetScn=0L; streamOffsetCommitScn=0L;
try { stream = Files.newInputStream(Paths.get(filename)); Map<String, Object> offset = context.offsetStorageReader().offset(Collections.singletonMap(FILENAME_FIELD, filename)); if (offset != null) { Object lastRecordedOffset = offset.get(POSITION_FIELD);