private long getPreviousStartFetchEpochTimeForPartition(KafkaPartition partition, SourceState state) { getAllPreviousOffsetState(state); return this.previousStartFetchEpochTimes.containsKey(partition) ? this.previousStartFetchEpochTimes.get(partition) : 0; }
private long getPreviousStopFetchEpochTimeForPartition(KafkaPartition partition, SourceState state) { getAllPreviousOffsetState(state); return this.previousStopFetchEpochTimes.containsKey(partition) ? this.previousStopFetchEpochTimes.get(partition) : 0; }
private long getPreviousLowWatermark(KafkaPartition partition, SourceState state) throws PreviousOffsetNotFoundException { getAllPreviousOffsetState(state); if (this.previousLowWatermarks.containsKey(partition)) { return this.previousLowWatermarks.get(partition); } throw new PreviousOffsetNotFoundException(String .format("Previous low watermark for topic %s, partition %s not found.", partition.getTopicName(), partition.getId())); }
private long getPreviousExpectedHighWatermark(KafkaPartition partition, SourceState state) throws PreviousOffsetNotFoundException { getAllPreviousOffsetState(state); if (this.previousExpectedHighWatermarks.containsKey(partition)) { return this.previousExpectedHighWatermarks.get(partition); } throw new PreviousOffsetNotFoundException(String .format("Previous expected high watermark for topic %s, partition %s not found.", partition.getTopicName(), partition.getId())); }
private long getPreviousOffsetFetchEpochTimeForPartition(KafkaPartition partition, SourceState state) throws PreviousOffsetNotFoundException { getAllPreviousOffsetState(state); if (this.previousOffsetFetchEpochTimes.containsKey(partition)) { return this.previousOffsetFetchEpochTimes.get(partition); } throw new PreviousOffsetNotFoundException(String .format("Previous offset fetch epoch time for topic %s, partition %s not found.", partition.getTopicName(), partition.getId())); }
private long getPreviousOffsetForPartition(KafkaPartition partition, SourceState state) throws PreviousOffsetNotFoundException { getAllPreviousOffsetState(state); if (this.previousOffsets.containsKey(partition)) { return this.previousOffsets.get(partition); } throw new PreviousOffsetNotFoundException(String .format("Previous offset for topic %s, partition %s not found.", partition.getTopicName(), partition.getId())); }
private void createEmptyWorkUnitsForSkippedPartitions(Map<String, List<WorkUnit>> workUnits, Map<String, State> topicSpecificStateMap, SourceState state) { // in case the previous offset not been set getAllPreviousOffsetState(state); // For each partition that has a previous offset, create an empty WorkUnit for it if // it is not in this.partitionsToBeProcessed. for (Map.Entry<KafkaPartition, Long> entry : this.previousOffsets.entrySet()) { KafkaPartition partition = entry.getKey(); if (!this.partitionsToBeProcessed.contains(partition)) { String topicName = partition.getTopicName(); if (!this.isDatasetStateEnabled.get() || this.topicsToProcess.contains(topicName)) { long previousOffset = entry.getValue(); WorkUnit emptyWorkUnit = createEmptyWorkUnit(partition, previousOffset, this.previousOffsetFetchEpochTimes.get(partition), Optional.fromNullable(topicSpecificStateMap.get(partition.getTopicName()))); if (workUnits.containsKey(topicName)) { workUnits.get(topicName).add(emptyWorkUnit); } else { workUnits.put(topicName, Lists.newArrayList(emptyWorkUnit)); } } } } }
private long getPreviousStopFetchEpochTimeForPartition(KafkaPartition partition, SourceState state) { getAllPreviousOffsetState(state); return this.previousStopFetchEpochTimes.containsKey(partition) ? this.previousStopFetchEpochTimes.get(partition) : 0; }
private long getPreviousStartFetchEpochTimeForPartition(KafkaPartition partition, SourceState state) { getAllPreviousOffsetState(state); return this.previousStartFetchEpochTimes.containsKey(partition) ? this.previousStartFetchEpochTimes.get(partition) : 0; }
private long getPreviousLowWatermark(KafkaPartition partition, SourceState state) throws PreviousOffsetNotFoundException { getAllPreviousOffsetState(state); if (this.previousLowWatermarks.containsKey(partition)) { return this.previousLowWatermarks.get(partition); } throw new PreviousOffsetNotFoundException(String .format("Previous low watermark for topic %s, partition %s not found.", partition.getTopicName(), partition.getId())); }
private long getPreviousExpectedHighWatermark(KafkaPartition partition, SourceState state) throws PreviousOffsetNotFoundException { getAllPreviousOffsetState(state); if (this.previousExpectedHighWatermarks.containsKey(partition)) { return this.previousExpectedHighWatermarks.get(partition); } throw new PreviousOffsetNotFoundException(String .format("Previous expected high watermark for topic %s, partition %s not found.", partition.getTopicName(), partition.getId())); }
private long getPreviousOffsetFetchEpochTimeForPartition(KafkaPartition partition, SourceState state) throws PreviousOffsetNotFoundException { getAllPreviousOffsetState(state); if (this.previousOffsetFetchEpochTimes.containsKey(partition)) { return this.previousOffsetFetchEpochTimes.get(partition); } throw new PreviousOffsetNotFoundException(String .format("Previous offset fetch epoch time for topic %s, partition %s not found.", partition.getTopicName(), partition.getId())); }
private long getPreviousOffsetForPartition(KafkaPartition partition, SourceState state) throws PreviousOffsetNotFoundException { getAllPreviousOffsetState(state); if (this.previousOffsets.containsKey(partition)) { return this.previousOffsets.get(partition); } throw new PreviousOffsetNotFoundException(String .format("Previous offset for topic %s, partition %s not found.", partition.getTopicName(), partition.getId())); }
private void createEmptyWorkUnitsForSkippedPartitions(Map<String, List<WorkUnit>> workUnits, Map<String, State> topicSpecificStateMap, SourceState state) { // in case the previous offset not been set getAllPreviousOffsetState(state); // For each partition that has a previous offset, create an empty WorkUnit for it if // it is not in this.partitionsToBeProcessed. for (Map.Entry<KafkaPartition, Long> entry : this.previousOffsets.entrySet()) { KafkaPartition partition = entry.getKey(); if (!this.partitionsToBeProcessed.contains(partition)) { String topicName = partition.getTopicName(); if (!this.isDatasetStateEnabled.get() || this.topicsToProcess.contains(topicName)) { long previousOffset = entry.getValue(); WorkUnit emptyWorkUnit = createEmptyWorkUnit(partition, previousOffset, this.previousOffsetFetchEpochTimes.get(partition), Optional.fromNullable(topicSpecificStateMap.get(partition.getTopicName()))); if (workUnits.containsKey(topicName)) { workUnits.get(topicName).add(emptyWorkUnit); } else { workUnits.put(topicName, Lists.newArrayList(emptyWorkUnit)); } } } } }