/** * Loads the connector's persistent offset (if present) via the given loader. */ @Override protected OffsetContext getPreviousOffset(OffsetContext.Loader loader) { Map<String, ?> partition = loader.getPartition(); Map<String, Object> previousOffset = context.offsetStorageReader() .offsets(Collections.singleton(partition)) .get(partition); if (previousOffset != null) { OffsetContext offsetContext = loader.load(previousOffset); LOGGER.info("Found previous offset {}", offsetContext); return offsetContext; } else { return null; } }
/** * Loads the connector's persistent offset (if present) via the given loader. */ protected OffsetContext getPreviousOffset(OffsetContext.Loader loader) { Map<String, ?> partition = loader.getPartition(); Map<String, Object> previousOffset = context.offsetStorageReader() .offsets(Collections.singleton(partition)) .get(partition); if (previousOffset != null) { OffsetContext offsetContext = loader.load(previousOffset); LOGGER.info("Found previous offset {}", offsetContext); return offsetContext; } else { return null; } } }
context.offsetStorageReader().offsets(partitions).forEach(source::setOffsetFor);
Map<String, ?> lastOffset = context.offsetStorageReader().offset(partition); long lastId = lastOffset == null ? 0L : (Long) lastOffset.get("id");
Map<String, Object> existingOffset = context.offsetStorageReader().offset(sourceInfo.partition()); LoggingContext.PreviousContext previousContext = taskContext.configureLoggingContext(CONTEXT_NAME); try {
boolean snapshotEventsAreInserts = true; Map<String, String> partition = Collect.hashMapOf(SourceInfo.SERVER_PARTITION_KEY, serverName); Map<String, ?> offsets = getRestartOffset(context.offsetStorageReader().offset(partition)); final SourceInfo source; if (offsets != null) {
/** * Loads the current saved offsets. */ private void loadOffsets() { List<Map<String, String>> partitions = new ArrayList<>(); for (String db : databases) { Map<String, String> partition = Collections.singletonMap("mongodb", db); partitions.add(partition); } offsets.putAll(context.offsetStorageReader().offsets(partitions)); } }
@Override public void start(Map<String, String> props) { final long throughput; try { name = props.get(NAME_CONFIG); id = Integer.parseInt(props.get(ID_CONFIG)); topic = props.get(TOPIC_CONFIG); throughput = Long.parseLong(props.get(THROUGHPUT_CONFIG)); } catch (NumberFormatException e) { throw new ConnectException("Invalid VerifiableSourceTask configuration", e); } partition = Collections.singletonMap(ID_FIELD, id); Map<String, Object> previousOffset = this.context.offsetStorageReader().offset(partition); if (previousOffset != null) seqno = (Long) previousOffset.get(SEQNO_FIELD) + 1; else seqno = 0; startingSeqno = seqno; throttler = new ThroughputThrottler(throughput, System.currentTimeMillis()); log.info("Started VerifiableSourceTask {}-{} producing to topic {} resuming from seqno {}", name, id, topic, startingSeqno); }
@Override public void start(Map<String, String> props) { final long throughput; try { name = props.get(NAME_CONFIG); id = Integer.parseInt(props.get(ID_CONFIG)); topic = props.get(TOPIC_CONFIG); maxNumMsgs = Long.parseLong(props.get(NUM_MSGS_CONFIG)); multipleSchema = Boolean.parseBoolean(props.get(MULTIPLE_SCHEMA_CONFIG)); partitionCount = Integer.parseInt(props.containsKey(PARTITION_COUNT_CONFIG) ? props.get(PARTITION_COUNT_CONFIG) : "1"); throughput = Long.parseLong(props.get(THROUGHPUT_CONFIG)); } catch (NumberFormatException e) { throw new ConnectException("Invalid SchemaSourceTask configuration", e); } throttler = new ThroughputThrottler(throughput, System.currentTimeMillis()); partition = Collections.singletonMap(ID_FIELD, id); Map<String, Object> previousOffset = this.context.offsetStorageReader().offset(partition); if (previousOffset != null) { seqno = (Long) previousOffset.get(SEQNO_FIELD) + 1; } else { seqno = 0; } startingSeqno = seqno; count = 0; log.info("Started SchemaSourceTask {}-{} producing to topic {} resuming from seqno {}", name, id, topic, startingSeqno); }
private void initializeLastVariables(){ Map<String, Object> lastSourceOffset = null; lastSourceOffset = context.offsetStorageReader().offset(sourcePartition()); if( lastSourceOffset == null){ // we haven't fetched anything yet, so we initialize to 7 days ago nextQuerySince = config.getSince(); lastIssueNumber = -1; } else { Object updatedAt = lastSourceOffset.get(UPDATED_AT_FIELD); Object issueNumber = lastSourceOffset.get(NUMBER_FIELD); Object nextPage = lastSourceOffset.get(NEXT_PAGE_FIELD); if(updatedAt != null && (updatedAt instanceof String)){ nextQuerySince = Instant.parse((String) updatedAt); } if(issueNumber != null && (issueNumber instanceof String)){ lastIssueNumber = Integer.valueOf((String) issueNumber); } if (nextPage != null && (nextPage instanceof String)){ nextPageToVisit = Integer.valueOf((String) nextPage); } } }
/** * Loads the connector's persistent offset (if present) via the given loader. */ protected OffsetContext getPreviousOffset(OffsetContext.Loader loader) { Map<String, ?> partition = loader.getPartition(); Map<String, Object> previousOffset = context.offsetStorageReader() .offsets(Collections.singleton(partition)) .get(partition); if (previousOffset != null) { OffsetContext offsetContext = loader.load(previousOffset); LOGGER.info("Found previous offset {}", offsetContext); return offsetContext; } else { return null; } } }
@Override public List<SourceRecord> poll() throws InterruptedException { while (stop != null && !stop.get() && !policy.hasEnded()) { log.trace("Polling for new data"); final List<SourceRecord> results = new ArrayList<>(); List<FileMetadata> files = filesToProcess(); files.forEach(metadata -> { try (FileReader reader = policy.offer(metadata, context.offsetStorageReader())) { log.info("Processing records for file {}", metadata); while (reader.hasNext()) { results.add(convert(metadata, reader.currentOffset(), reader.next())); } } catch (ConnectException | IOException e) { //when an exception happens reading a file, the connector continues log.error("Error reading file from FS: " + metadata.getPath() + ". Keep going...", e); } }); return results; } return null; }
@Override public void start(Map<String, String> map) { super.start(map); Service service = service(this, this.context.offsetStorageReader()); List<Service> services = Arrays.asList(service); this.serviceManager = new ServiceManager(services); log.info("Starting Services"); this.serviceManager.startAsync(); try { this.serviceManager.awaitHealthy(60, TimeUnit.SECONDS); } catch (TimeoutException e) { throw new ConnectException("Timeout while starting service.", e); } }
private void expectOffsetLookupReturnNone() { EasyMock.expect(context.offsetStorageReader()).andReturn(offsetStorageReader); EasyMock.expect(offsetStorageReader.offset(EasyMock.anyObject(Map.class))).andReturn(null); } }
@Override public void start(Map<String, String> map) { super.start(map); Service service = service(this, this.context.offsetStorageReader()); List<Service> services = Arrays.asList(service); this.serviceManager = new ServiceManager(services); log.info("Starting Services"); this.serviceManager.startAsync(); try { this.serviceManager.awaitHealthy(60, TimeUnit.SECONDS); } catch (TimeoutException e) { throw new ConnectException("Timeout while starting service.", e); } }
private void mockConsumerInitialization() throws Exception { TopicPartition firstTopicPartition = new TopicPartition(FIRST_TOPIC, FIRST_PARTITION); Collection<TopicPartition> topicPartitions = new ArrayList<>(); topicPartitions.add(firstTopicPartition); Map<TopicPartition, Long> endOffsets = Collections.singletonMap(firstTopicPartition, FIRST_OFFSET); EasyMock.expect(context.offsetStorageReader()).andReturn(offsetStorageReader); EasyMock.expect(offsetStorageReader.offsets(EasyMock.<List<Map<String, String>>>anyObject())) .andReturn(new HashMap<>()); PowerMock.expectNew(KafkaConsumer.class, new Class[] { Properties.class }, config.getKafkaConsumerProperties()) .andReturn(consumer); EasyMock.expect(consumer.endOffsets(topicPartitions)).andReturn(endOffsets); consumer.assign(topicPartitions); EasyMock.expectLastCall(); consumer.seek(firstTopicPartition, FIRST_OFFSET); EasyMock.expectLastCall(); }
@Test public void testStartAllStoredPartitions() throws Exception { TopicPartition firstTopicPartition = new TopicPartition(FIRST_TOPIC, FIRST_PARTITION); Collection<TopicPartition> topicPartitions = new ArrayList<>(); topicPartitions.add(firstTopicPartition); Map<Map<String, String>, Map<String, Object>> storedOffsets = Collections.singletonMap( Collections.singletonMap(TOPIC_PARTITION_KEY, String.format("%s:%d", FIRST_TOPIC, FIRST_PARTITION)), Collections.singletonMap(OFFSET_KEY, FIRST_OFFSET)); EasyMock.expect(context.offsetStorageReader()).andReturn(offsetStorageReader); EasyMock.expect(offsetStorageReader.offsets(EasyMock.<List<Map<String, String>>>anyObject())) .andReturn(storedOffsets); PowerMock.expectNew(KafkaConsumer.class, new Class[] { Properties.class }, config.getKafkaConsumerProperties()) .andReturn(consumer); consumer.assign(topicPartitions); EasyMock.expectLastCall(); consumer.seek(firstTopicPartition, FIRST_OFFSET); EasyMock.expectLastCall(); replayAll(); objectUnderTest.start(opts); verifyAll(); }
@Test public void testStartNoStoredPartitionsStartEnd() throws Exception { TopicPartition firstTopicPartition = new TopicPartition(FIRST_TOPIC, FIRST_PARTITION); Collection<TopicPartition> topicPartitions = new ArrayList<>(); topicPartitions.add(firstTopicPartition); Map<TopicPartition, Long> endOffsets = Collections.singletonMap(firstTopicPartition, FIRST_OFFSET); EasyMock.expect(context.offsetStorageReader()).andReturn(offsetStorageReader); EasyMock.expect(offsetStorageReader.offsets(EasyMock.<List<Map<String, String>>>anyObject())) .andReturn(new HashMap<>()); PowerMock.expectNew(KafkaConsumer.class, new Class[] { Properties.class }, config.getKafkaConsumerProperties()) .andReturn(consumer); EasyMock.expect(consumer.endOffsets(topicPartitions)).andReturn(endOffsets); consumer.assign(topicPartitions); EasyMock.expectLastCall(); consumer.seek(firstTopicPartition, FIRST_OFFSET); EasyMock.expectLastCall(); replayAll(); objectUnderTest.start(opts); verifyAll(); }
@Test public void testStartNoStoredPartitionsStartBeginning() throws Exception { opts.put(KafkaSourceConnectorConfig.CONSUMER_AUTO_OFFSET_RESET_CONFIG, "earliest"); config = new KafkaSourceConnectorConfig(opts); props = new Properties(); props.putAll(config.allWithPrefix(KafkaSourceConnectorConfig.CONSUMER_PREFIX)); TopicPartition firstTopicPartition = new TopicPartition(FIRST_TOPIC, FIRST_PARTITION); Collection<TopicPartition> topicPartitions = new ArrayList<>(); topicPartitions.add(firstTopicPartition); Map<TopicPartition, Long> endOffsets = Collections.singletonMap(firstTopicPartition, FIRST_OFFSET); EasyMock.expect(context.offsetStorageReader()).andReturn(offsetStorageReader); EasyMock.expect(offsetStorageReader.offsets(EasyMock.<List<Map<String, String>>>anyObject())) .andReturn(new HashMap<>()); PowerMock.expectNew(KafkaConsumer.class, new Class[] { Properties.class }, config.getKafkaConsumerProperties()) .andReturn(consumer); EasyMock.expect(consumer.beginningOffsets(topicPartitions)).andReturn(endOffsets); consumer.assign(topicPartitions); EasyMock.expectLastCall(); consumer.seek(firstTopicPartition, FIRST_OFFSET); EasyMock.expectLastCall(); replayAll(); objectUnderTest.start(opts); verifyAll(); }
Collections.singletonMap(OFFSET_KEY, SECOND_OFFSET)); EasyMock.expect(context.offsetStorageReader()).andReturn(offsetStorageReader); EasyMock.expect(offsetStorageReader.offsets(EasyMock.<List<Map<String, String>>>anyObject())) .andReturn(storedOffsets);