@Override public void seek(TopicPartition partition, long offset) { delegate.seek(partition, offset); }
} else if (lastBatchMeta != null) { LOG.debug("First poll for topic partition [{}], using last batch metadata", tp); consumer.seek(tp, lastBatchMeta.getLastOffset() + 1); // seek next offset after last offset from previous batch } else if (firstPollOffsetStrategy == UNCOMMITTED_EARLIEST) { LOG.debug("First poll for topic partition [{}] with no last batch metadata, seeking to partition beginning", tp); consumer.seek(tp, lastBatchMeta.getLastOffset() + 1); // seek next offset after last offset from previous batch LOG.debug("First poll for topic partition [{}], using last batch metadata", tp); } else { consumer.seek(tp, initialFetchOffset); LOG.debug("First poll for topic partition [{}], no last batch metadata present." + " Using stored initial fetch offset [{}]", tp, initialFetchOffset);
@Override public void start(WatermarkStorage watermarkStorage) throws IOException { Preconditions.checkArgument(watermarkStorage != null, "Watermark Storage should not be null"); Map<String, CheckpointableWatermark> watermarkMap = watermarkStorage.getCommittedWatermarks(KafkaWatermark.class, Collections.singletonList(_partition.toString())); KafkaWatermark watermark = (KafkaWatermark) watermarkMap.get(_partition.toString()); if (watermark == null) { LOG.info("Offset is null - seeking to beginning of topic and partition for {} ", _partition.toString()); _consumer.seekToBeginning(_partition); } else { // seek needs to go one past the last committed offset LOG.info("Offset found in consumer for partition {}. Seeking to one past what we found : {}", _partition.toString(), watermark.getLwm().getValue() + 1); _consumer.seek(_partition, watermark.getLwm().getValue() + 1); } _isStarted.set(true); }
private void doSeekRetriableTopicPartitions(Map<TopicPartition, Long> pollableEarliestRetriableOffsets) { for (Entry<TopicPartition, Long> retriableTopicPartitionAndOffset : pollableEarliestRetriableOffsets.entrySet()) { //Seek directly to the earliest retriable message for each retriable topic partition consumer.seek(retriableTopicPartitionAndOffset.getKey(), retriableTopicPartitionAndOffset.getValue()); } }
consumer.seek(topicPartition, requestedStartOffset); this.startOffset = consumer.position(topicPartition); if (this.startOffset != requestedStartOffset) {
private void rollback(final TopicPartition topicPartition) { OffsetAndMetadata offsetAndMetadata = uncommittedOffsetsMap.get(topicPartition); if (offsetAndMetadata == null) { offsetAndMetadata = kafkaConsumer.committed(topicPartition); } final long offset = offsetAndMetadata.offset(); kafkaConsumer.seek(topicPartition, offset); }
private void rollback(final TopicPartition topicPartition) { try { OffsetAndMetadata offsetAndMetadata = uncommittedOffsetsMap.get(topicPartition); if (offsetAndMetadata == null) { offsetAndMetadata = kafkaConsumer.committed(topicPartition); } final long offset = offsetAndMetadata == null ? 0L : offsetAndMetadata.offset(); kafkaConsumer.seek(topicPartition, offset); } catch (final Exception rollbackException) { logger.warn("Attempted to rollback Kafka message offset but was unable to do so", rollbackException); } }
private void rollback(final TopicPartition topicPartition) { try { OffsetAndMetadata offsetAndMetadata = uncommittedOffsetsMap.get(topicPartition); if (offsetAndMetadata == null) { offsetAndMetadata = kafkaConsumer.committed(topicPartition); } final long offset = offsetAndMetadata == null ? 0L : offsetAndMetadata.offset(); kafkaConsumer.seek(topicPartition, offset); } catch (final Exception rollbackException) { logger.warn("Attempted to rollback Kafka message offset but was unable to do so", rollbackException); } }
private void rollback(final TopicPartition topicPartition) { try { OffsetAndMetadata offsetAndMetadata = uncommittedOffsetsMap.get(topicPartition); if (offsetAndMetadata == null) { offsetAndMetadata = kafkaConsumer.committed(topicPartition); } final long offset = offsetAndMetadata == null ? 0L : offsetAndMetadata.offset(); kafkaConsumer.seek(topicPartition, offset); } catch (final Exception rollbackException) { logger.warn("Attempted to rollback Kafka message offset but was unable to do so", rollbackException); } }
consumer.seek(tp, committedOffset);
log.info("{} fetching offset {} ", topic + ":" + split.getBrokers() + ":" + partition, watermark); TopicPartition topicPartition = new TopicPartition(topic, partition); consumer.seek(topicPartition, watermark); messages = consumer.poll(timeOut); iterator = messages.iterator();
@Override public Iterator<KafkaConsumerRecord> consume(KafkaPartition partition, long nextOffset, long maxOffset) { if (nextOffset > maxOffset) { return null; } this.consumer.assign(Lists.newArrayList(new TopicPartition(partition.getTopicName(), partition.getId()))); this.consumer.seek(new TopicPartition(partition.getTopicName(), partition.getId()), nextOffset); ConsumerRecords<K, V> consumerRecords = consumer.poll(super.fetchTimeoutMillis); return Iterators.transform(consumerRecords.iterator(), new Function<ConsumerRecord<K, V>, KafkaConsumerRecord>() { @Override public KafkaConsumerRecord apply(ConsumerRecord<K, V> input) { return new Kafka09ConsumerRecord<>(input); } }); }
consumer.seek(currBatchTp, seekOffset);
private void resetToDatetime(Consumer<byte[], byte[]> client, Set<TopicPartition> inputTopicPartitions, Long timestamp) { final Map<TopicPartition, Long> topicPartitionsAndTimes = new HashMap<>(inputTopicPartitions.size()); for (final TopicPartition topicPartition : inputTopicPartitions) { topicPartitionsAndTimes.put(topicPartition, timestamp); } final Map<TopicPartition, OffsetAndTimestamp> topicPartitionsAndOffset = client.offsetsForTimes(topicPartitionsAndTimes); for (final TopicPartition topicPartition : inputTopicPartitions) { client.seek(topicPartition, topicPartitionsAndOffset.get(topicPartition).offset()); } }
private void resetToDatetime(Consumer<byte[], byte[]> client, Set<TopicPartition> inputTopicPartitions, Long timestamp) { final Map<TopicPartition, Long> topicPartitionsAndTimes = new HashMap<>(inputTopicPartitions.size()); for (final TopicPartition topicPartition : inputTopicPartitions) { topicPartitionsAndTimes.put(topicPartition, timestamp); } final Map<TopicPartition, OffsetAndTimestamp> topicPartitionsAndOffset = client.offsetsForTimes(topicPartitionsAndTimes); for (final TopicPartition topicPartition : inputTopicPartitions) { client.seek(topicPartition, topicPartitionsAndOffset.get(topicPartition).offset()); } }
public void resetOffsetsTo(Consumer<byte[], byte[]> client, Set<TopicPartition> inputTopicPartitions, Long offset) { final Map<TopicPartition, Long> endOffsets = client.endOffsets(inputTopicPartitions); final Map<TopicPartition, Long> beginningOffsets = client.beginningOffsets(inputTopicPartitions); final Map<TopicPartition, Long> topicPartitionsAndOffset = new HashMap<>(inputTopicPartitions.size()); for (final TopicPartition topicPartition : inputTopicPartitions) { topicPartitionsAndOffset.put(topicPartition, offset); } final Map<TopicPartition, Long> validatedTopicPartitionsAndOffset = checkOffsetRange(topicPartitionsAndOffset, beginningOffsets, endOffsets); for (final TopicPartition topicPartition : inputTopicPartitions) { client.seek(topicPartition, validatedTopicPartitionsAndOffset.get(topicPartition)); } }
public void resetOffsetsFromResetPlan(Consumer<byte[], byte[]> client, Set<TopicPartition> inputTopicPartitions, Map<TopicPartition, Long> topicPartitionsAndOffset) { final Map<TopicPartition, Long> endOffsets = client.endOffsets(inputTopicPartitions); final Map<TopicPartition, Long> beginningOffsets = client.beginningOffsets(inputTopicPartitions); final Map<TopicPartition, Long> validatedTopicPartitionsAndOffset = checkOffsetRange(topicPartitionsAndOffset, beginningOffsets, endOffsets); for (final TopicPartition topicPartition : inputTopicPartitions) { client.seek(topicPartition, validatedTopicPartitionsAndOffset.get(topicPartition)); } }
public void resetOffsetsFromResetPlan(Consumer<byte[], byte[]> client, Set<TopicPartition> inputTopicPartitions, Map<TopicPartition, Long> topicPartitionsAndOffset) { final Map<TopicPartition, Long> endOffsets = client.endOffsets(inputTopicPartitions); final Map<TopicPartition, Long> beginningOffsets = client.beginningOffsets(inputTopicPartitions); final Map<TopicPartition, Long> validatedTopicPartitionsAndOffset = checkOffsetRange(topicPartitionsAndOffset, beginningOffsets, endOffsets); for (final TopicPartition topicPartition : inputTopicPartitions) { client.seek(topicPartition, validatedTopicPartitionsAndOffset.get(topicPartition)); } }
@Bean public ConsumerAwareListenerErrorHandler listen3ErrorHandler() { return (m, e, c) -> { this.listen3Exception = e; MessageHeaders headers = m.getHeaders(); c.seek(new org.apache.kafka.common.TopicPartition( headers.get(KafkaHeaders.RECEIVED_TOPIC, String.class), headers.get(KafkaHeaders.RECEIVED_PARTITION_ID, Integer.class)), headers.get(KafkaHeaders.OFFSET, Long.class)); return null; }; }