public long getCommandTopicConsumerPosition() { return commandConsumer.position(commandTopicPartition); }
@Override public long position(TopicPartition partition) { return delegate.position(partition); }
public long position(TopicPartition partition, Duration timeout) { return delegate.position(partition, timeout); }
private Map<TopicPartition, OffsetAndMetadata> createFetchedOffsetsMetadata(Set<TopicPartition> assignedPartitions) { Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = new HashMap<>(); for (TopicPartition tp : assignedPartitions) { offsetsToCommit.put(tp, new OffsetAndMetadata(consumer.position(tp), commitMetadataManager.getCommitMetadata())); } return offsetsToCommit; }
/** * The check if the consumption is done or not. The consumption is done if the consumer has caught up with the * log end or all the partitions are paused. * @param endOffsets the log end for each partition. * @return true if the consumption is done, false otherwise. */ private boolean consumptionDone(Map<TopicPartition, Long> endOffsets) { Set<TopicPartition> partitionsNotPaused = new HashSet<>(_metricConsumer.assignment()); partitionsNotPaused.removeAll(_metricConsumer.paused()); for (TopicPartition tp : partitionsNotPaused) { if (_metricConsumer.position(tp) < endOffsets.get(tp)) { return false; } } return true; }
/** * Poll more records from the Kafka Broker. * * @throws PollTimeoutException if poll returns 0 record and consumer's position < requested endOffset. */ private void pollRecords() { if (LOG.isTraceEnabled()) { stopwatch.reset().start(); } records = consumer.poll(pollTimeoutDurationMs); if (LOG.isTraceEnabled()) { stopwatch.stop(); LOG.trace("Pulled [{}] records in [{}] ms", records.count(), stopwatch.elapsed(TimeUnit.MILLISECONDS)); } // Fail if we can not poll within one lap of pollTimeoutMs. if (records.isEmpty() && consumer.position(topicPartition) < endOffset) { throw new PollTimeoutException(String.format(ERROR_POLL_TIMEOUT_FORMAT, pollTimeoutMs, topicPartition.toString(), startOffset, consumer.position(topicPartition), endOffset)); } consumerRecordIterator = records.iterator(); consumerPosition = consumer.position(topicPartition); }
this.endOffset = consumer.position(topicPartition); LOG.info("End Offset set to [{}]", this.endOffset); } else { LOG.info("Seeking to offset [{}] of topic partition [{}]", requestedStartOffset, topicPartition); consumer.seek(topicPartition, requestedStartOffset); this.startOffset = consumer.position(topicPartition); if (this.startOffset != requestedStartOffset) { LOG.warn("Current Start Offset [{}] is different form the requested start position [{}]", this.startOffset = consumer.position(topicPartition); LOG.info("Consumer at beginning of topic partition [{}], current start offset [{}]", topicPartition, consumerPosition = consumer.position(topicPartition); Preconditions.checkState(this.endOffset >= consumerPosition, "End offset [%s] need to be greater or equal than start offset [%s]",
@Override public long getLatestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException { TopicPartition topicPartition = new TopicPartition(partition.getTopicName(), partition.getId()); this.consumer.assign(Collections.singletonList(topicPartition)); this.consumer.seekToEnd(topicPartition); return this.consumer.position(topicPartition); }
long position = consumer.position(tp); long committedOffset = tpOffset.getValue().offset(); if (position < committedOffset) {
@Override public long getEarliestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException { TopicPartition topicPartition = new TopicPartition(partition.getTopicName(), partition.getId()); this.consumer.assign(Collections.singletonList(topicPartition)); this.consumer.seekToBeginning(topicPartition); return this.consumer.position(topicPartition); }
consumer.seekToEnd(Collections.singleton(tp)); tpToFirstSeekOffset.put(tp, consumer.position(tp)); } else if (lastBatchMeta != null) { consumer.seek(tp, lastBatchMeta.getLastOffset() + 1); // seek next offset after last offset from previous batch final long fetchOffset = consumer.position(tp); LOG.debug("Set [fetchOffset = {}] for partition [{}]", fetchOffset, tp); return fetchOffset;
long lastEmittedOffset = consumer.position(currBatchTp) - 1; currentBatch = new KafkaTridentSpoutBatchMetadata(lastEmittedOffset, lastEmittedOffset, topologyContext.getStormId());
public void shiftOffsetsBy(Consumer<byte[], byte[]> client, Set<TopicPartition> inputTopicPartitions, long shiftBy) { final Map<TopicPartition, Long> endOffsets = client.endOffsets(inputTopicPartitions); final Map<TopicPartition, Long> beginningOffsets = client.beginningOffsets(inputTopicPartitions); final Map<TopicPartition, Long> topicPartitionsAndOffset = new HashMap<>(inputTopicPartitions.size()); for (final TopicPartition topicPartition : inputTopicPartitions) { final long position = client.position(topicPartition); final long offset = position + shiftBy; topicPartitionsAndOffset.put(topicPartition, offset); } final Map<TopicPartition, Long> validatedTopicPartitionsAndOffset = checkOffsetRange(topicPartitionsAndOffset, beginningOffsets, endOffsets); for (final TopicPartition topicPartition : inputTopicPartitions) { client.seek(topicPartition, validatedTopicPartitionsAndOffset.get(topicPartition)); } }
public void shiftOffsetsBy(Consumer<byte[], byte[]> client, Set<TopicPartition> inputTopicPartitions, long shiftBy) { final Map<TopicPartition, Long> endOffsets = client.endOffsets(inputTopicPartitions); final Map<TopicPartition, Long> beginningOffsets = client.beginningOffsets(inputTopicPartitions); final Map<TopicPartition, Long> topicPartitionsAndOffset = new HashMap<>(inputTopicPartitions.size()); for (final TopicPartition topicPartition : inputTopicPartitions) { final long position = client.position(topicPartition); final long offset = position + shiftBy; topicPartitionsAndOffset.put(topicPartition, offset); } final Map<TopicPartition, Long> validatedTopicPartitionsAndOffset = checkOffsetRange(topicPartitionsAndOffset, beginningOffsets, endOffsets); for (final TopicPartition topicPartition : inputTopicPartitions) { client.seek(topicPartition, validatedTopicPartitionsAndOffset.get(topicPartition)); } }
public void shiftOffsetsBy(Consumer<byte[], byte[]> client, Set<TopicPartition> inputTopicPartitions, long shiftBy) { final Map<TopicPartition, Long> endOffsets = client.endOffsets(inputTopicPartitions); final Map<TopicPartition, Long> beginningOffsets = client.beginningOffsets(inputTopicPartitions); final Map<TopicPartition, Long> topicPartitionsAndOffset = new HashMap<>(inputTopicPartitions.size()); for (final TopicPartition topicPartition : inputTopicPartitions) { final long position = client.position(topicPartition); final long offset = position + shiftBy; topicPartitionsAndOffset.put(topicPartition, offset); } final Map<TopicPartition, Long> validatedTopicPartitionsAndOffset = checkOffsetRange(topicPartitionsAndOffset, beginningOffsets, endOffsets); for (final TopicPartition topicPartition : inputTopicPartitions) { client.seek(topicPartition, validatedTopicPartitionsAndOffset.get(topicPartition)); } }
@Override public String getSampleMessage(final String topic) { String message = null; if (listTopics().contains(topic)) { try (Consumer<String, String> kafkaConsumer = kafkaConsumerFactory.createConsumer()) { kafkaConsumer.assign(kafkaConsumer.partitionsFor(topic).stream() .map(partitionInfo -> new TopicPartition(topic, partitionInfo.partition())) .collect(Collectors.toList())); kafkaConsumer.assignment().stream() .filter(p -> (kafkaConsumer.position(p) - 1) >= 0) .forEach(p -> kafkaConsumer.seek(p, kafkaConsumer.position(p) - 1)); final ConsumerRecords<String, String> records = kafkaConsumer.poll(KAFKA_CONSUMER_TIMEOUT); message = records.isEmpty() ? null : records.iterator().next().value(); kafkaConsumer.unsubscribe(); } } return message; }
System.out.println("Topic: " + p.topic() + " Partition: " + p.partition() + " Offset: " + client.position(p));
System.out.println("Topic: " + p.topic() + " Partition: " + p.partition() + " Offset: " + client.position(p));
Consumer<Integer, String> consumer = cf.createConsumer(); consumer.assign(Arrays.asList(new TopicPartition(topic9, 0), new TopicPartition(topic9, 1))); assertThat(consumer.position(new TopicPartition(topic9, 0))).isEqualTo(2); assertThat(consumer.position(new TopicPartition(topic9, 1))).isEqualTo(2); container.stop(); consumer.close();
System.out.println("Topic: " + p.topic() + " Partition: " + p.partition() + " Offset: " + client.position(p));