Map<String, ?> lastOffset = context.offsetStorageReader().offset(partition); long lastId = lastOffset == null ? 0L : (Long) lastOffset.get("id");
Map<String, Object> existingOffset = context.offsetStorageReader().offset(sourceInfo.partition()); LoggingContext.PreviousContext previousContext = taskContext.configureLoggingContext(CONTEXT_NAME); try {
boolean snapshotEventsAreInserts = true; Map<String, String> partition = Collect.hashMapOf(SourceInfo.SERVER_PARTITION_KEY, serverName); Map<String, ?> offsets = getRestartOffset(context.offsetStorageReader().offset(partition)); final SourceInfo source; if (offsets != null) {
@Override public void start(Map<String, String> props) { final long throughput; try { name = props.get(NAME_CONFIG); id = Integer.parseInt(props.get(ID_CONFIG)); topic = props.get(TOPIC_CONFIG); throughput = Long.parseLong(props.get(THROUGHPUT_CONFIG)); } catch (NumberFormatException e) { throw new ConnectException("Invalid VerifiableSourceTask configuration", e); } partition = Collections.singletonMap(ID_FIELD, id); Map<String, Object> previousOffset = this.context.offsetStorageReader().offset(partition); if (previousOffset != null) seqno = (Long) previousOffset.get(SEQNO_FIELD) + 1; else seqno = 0; startingSeqno = seqno; throttler = new ThroughputThrottler(throughput, System.currentTimeMillis()); log.info("Started VerifiableSourceTask {}-{} producing to topic {} resuming from seqno {}", name, id, topic, startingSeqno); }
@Override public void start(Map<String, String> props) { final long throughput; try { name = props.get(NAME_CONFIG); id = Integer.parseInt(props.get(ID_CONFIG)); topic = props.get(TOPIC_CONFIG); maxNumMsgs = Long.parseLong(props.get(NUM_MSGS_CONFIG)); multipleSchema = Boolean.parseBoolean(props.get(MULTIPLE_SCHEMA_CONFIG)); partitionCount = Integer.parseInt(props.containsKey(PARTITION_COUNT_CONFIG) ? props.get(PARTITION_COUNT_CONFIG) : "1"); throughput = Long.parseLong(props.get(THROUGHPUT_CONFIG)); } catch (NumberFormatException e) { throw new ConnectException("Invalid SchemaSourceTask configuration", e); } throttler = new ThroughputThrottler(throughput, System.currentTimeMillis()); partition = Collections.singletonMap(ID_FIELD, id); Map<String, Object> previousOffset = this.context.offsetStorageReader().offset(partition); if (previousOffset != null) { seqno = (Long) previousOffset.get(SEQNO_FIELD) + 1; } else { seqno = 0; } startingSeqno = seqno; count = 0; log.info("Started SchemaSourceTask {}-{} producing to topic {} resuming from seqno {}", name, id, topic, startingSeqno); }
private void initializeLastVariables(){ Map<String, Object> lastSourceOffset = null; lastSourceOffset = context.offsetStorageReader().offset(sourcePartition()); if( lastSourceOffset == null){ // we haven't fetched anything yet, so we initialize to 7 days ago nextQuerySince = config.getSince(); lastIssueNumber = -1; } else { Object updatedAt = lastSourceOffset.get(UPDATED_AT_FIELD); Object issueNumber = lastSourceOffset.get(NUMBER_FIELD); Object nextPage = lastSourceOffset.get(NEXT_PAGE_FIELD); if(updatedAt != null && (updatedAt instanceof String)){ nextQuerySince = Instant.parse((String) updatedAt); } if(issueNumber != null && (issueNumber instanceof String)){ lastIssueNumber = Integer.valueOf((String) issueNumber); } if (nextPage != null && (nextPage instanceof String)){ nextPageToVisit = Integer.valueOf((String) nextPage); } } }
private void expectOffsetLookupReturnNone() { EasyMock.expect(context.offsetStorageReader()).andReturn(offsetStorageReader); EasyMock.expect(offsetStorageReader.offset(EasyMock.anyObject(Map.class))).andReturn(null); } }
@Override public FileReader offer(FileMetadata metadata, OffsetStorageReader offsetStorageReader) throws IOException { Map<String, Object> partition = new HashMap<String, Object>() {{ put("path", metadata.getPath()); //TODO manage blocks //put("blocks", metadata.getBlocks().toString()); }}; FileSystem current = fileSystems.stream() .filter(fs -> metadata.getPath().startsWith(fs.getWorkingDirectory().toString())) .findFirst().orElse(null); FileReader reader; try { reader = ReflectionUtils.makeReader((Class<? extends FileReader>) conf.getClass(FsSourceTaskConfig.FILE_READER_CLASS), current, new Path(metadata.getPath()), conf.originals()); } catch (Throwable t) { throw new ConnectException("An error has occurred when creating reader for file: " + metadata.getPath(), t); } Map<String, Object> offset = offsetStorageReader.offset(partition); if (offset != null && offset.get("offset") != null) { reader.seek(() -> (Long) offset.get("offset")); } return reader; }
try { stream = Files.newInputStream(Paths.get(filename)); Map<String, Object> offset = context.offsetStorageReader().offset(Collections.singletonMap(FILENAME_FIELD, filename)); if (offset != null) { Object lastRecordedOffset = offset.get(POSITION_FIELD);
logMinerStartScr=logMinerStartScr+logMinerOptions+") \n; end;"; logMinerStartStmt=dbConn.prepareCall(logMinerStartScr); Map<String,Object> offset = context.offsetStorageReader().offset(Collections.singletonMap(LOG_MINER_OFFSET_FIELD, dbName)); streamOffsetScn=0L; streamOffsetCommitScn=0L;
try { stream = new FileInputStream(filename); Map<String, Object> offset = context.offsetStorageReader().offset(Collections.singletonMap(FILENAME_FIELD, filename)); if (offset != null) { Object lastRecordedOffset = offset.get(POSITION_FIELD);
Long lastOffset = null; log.trace("looking up offset for {}", this.sourcePartition); Map<String, Object> offset = this.context.offsetStorageReader().offset(this.sourcePartition); if (null != offset && !offset.isEmpty()) { Number number = (Number) offset.get("offset");
Map<String, Object> existingOffset = context.offsetStorageReader().offset(sourceInfo.partition()); LoggingContext.PreviousContext previousContext = taskContext.configureLoggingContext(CONTEXT_NAME); try {