@Override public void seekToBeginning(Collection<TopicPartition> partitions) { delegate.seekToBeginning(partitions); }
@Override public long getEarliestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException { TopicPartition topicPartition = new TopicPartition(partition.getTopicName(), partition.getId()); this.consumer.assign(Collections.singletonList(topicPartition)); this.consumer.seekToBeginning(topicPartition); return this.consumer.position(topicPartition); }
public List<QueuedCommand> getRestoreCommands(final Duration duration) { final List<QueuedCommand> restoreCommands = Lists.newArrayList(); commandConsumer.seekToBeginning( Collections.singletonList(commandTopicPartition)); log.debug("Reading prior command records"); ConsumerRecords<CommandId, Command> records = commandConsumer.poll(duration); while (!records.isEmpty()) { log.debug("Received {} records from poll", records.count()); for (final ConsumerRecord<CommandId, Command> record : records) { if (record.value() == null) { continue; } restoreCommands.add( new QueuedCommand( record.key(), record.value(), Optional.empty())); } records = commandConsumer.poll(duration); } return restoreCommands; }
if (firstPollOffsetStrategy == EARLIEST && isFirstPollSinceTopologyWasDeployed) { LOG.debug("First poll for topic partition [{}], seeking to partition beginning", tp); consumer.seekToBeginning(Collections.singleton(tp)); } else if (firstPollOffsetStrategy == LATEST && isFirstPollSinceTopologyWasDeployed) { LOG.debug("First poll for topic partition [{}], seeking to partition end", tp); } else if (firstPollOffsetStrategy == UNCOMMITTED_EARLIEST) { LOG.debug("First poll for topic partition [{}] with no last batch metadata, seeking to partition beginning", tp); consumer.seekToBeginning(Collections.singleton(tp)); } else if (firstPollOffsetStrategy == UNCOMMITTED_LATEST) { LOG.debug("First poll for topic partition [{}] with no last batch metadata, seeking to partition end", tp);
consumer.seekToBeginning(Collections.singleton(topicPartition));
@Override public void start(WatermarkStorage watermarkStorage) throws IOException { Preconditions.checkArgument(watermarkStorage != null, "Watermark Storage should not be null"); Map<String, CheckpointableWatermark> watermarkMap = watermarkStorage.getCommittedWatermarks(KafkaWatermark.class, Collections.singletonList(_partition.toString())); KafkaWatermark watermark = (KafkaWatermark) watermarkMap.get(_partition.toString()); if (watermark == null) { LOG.info("Offset is null - seeking to beginning of topic and partition for {} ", _partition.toString()); _consumer.seekToBeginning(_partition); } else { // seek needs to go one past the last committed offset LOG.info("Offset found in consumer for partition {}. Seeking to one past what we found : {}", _partition.toString(), watermark.getLwm().getValue() + 1); _consumer.seek(_partition, watermark.getLwm().getValue() + 1); } _isStarted.set(true); }
@Test public void shouldGetRestoreCommandsCorrectly() { // Given: when(commandConsumer.poll(any(Duration.class))) .thenReturn(someConsumerRecords( new ConsumerRecord<>("topic", 0, 0, commandId1, command1), new ConsumerRecord<>("topic", 0, 0, commandId2, command2))) .thenReturn(someConsumerRecords( new ConsumerRecord<>("topic", 0, 0, commandId3, command3))) .thenReturn(new ConsumerRecords<>(Collections.emptyMap())); // When: final List<QueuedCommand> queuedCommandList = commandTopic .getRestoreCommands(Duration.ofMillis(1)); // Then: verify(commandConsumer).seekToBeginning(topicPartitionsCaptor.capture()); assertThat(topicPartitionsCaptor.getValue(), equalTo(Collections.singletonList(new TopicPartition(COMMAND_TOPIC_NAME, 0)))); assertThat(queuedCommandList, equalTo(ImmutableList.of( new QueuedCommand(commandId1, command1, Optional.empty()), new QueuedCommand(commandId2, command2, Optional.empty()), new QueuedCommand(commandId3, command3, Optional.empty())))); }
.collect(Collectors.toList())); consumer.seekToBeginning(records.partitions());
resetOffsetsTo(client, inputTopicPartitions, options.valueOf(toOffsetOption)); } else if (options.has(toEarliestOption)) { client.seekToBeginning(inputTopicPartitions); } else if (options.has(toLatestOption)) { client.seekToEnd(inputTopicPartitions); resetOffsetsFromResetPlan(client, inputTopicPartitions, topicPartitionsAndOffset); } else { client.seekToBeginning(inputTopicPartitions);
resetOffsetsTo(client, inputTopicPartitions, options.valueOf(toOffsetOption)); } else if (options.has(toEarliestOption)) { client.seekToBeginning(inputTopicPartitions); } else if (options.has(toLatestOption)) { client.seekToEnd(inputTopicPartitions); resetOffsetsFromResetPlan(client, inputTopicPartitions, topicPartitionsAndOffset); } else { client.seekToBeginning(inputTopicPartitions);
resetOffsetsTo(client, inputTopicPartitions, options.valueOf(toOffsetOption)); } else if (options.has(toEarliestOption)) { client.seekToBeginning(inputTopicPartitions); } else if (options.has(toLatestOption)) { client.seekToEnd(inputTopicPartitions); resetOffsetsFromResetPlan(client, inputTopicPartitions, topicPartitionsAndOffset); } else { client.seekToBeginning(inputTopicPartitions);
assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue(); ArgumentCaptor<Collection<TopicPartition>> captor = ArgumentCaptor.forClass(List.class); verify(consumer).seekToBeginning(captor.capture()); assertThat(captor.getValue()) .isEqualTo(new HashSet<>(Arrays.asList(new TopicPartition("foo", 0), new TopicPartition("foo", 4))));
@Override public void onPartitionsAssigned(Collection<TopicPartition> collection) { consumer.seekToBeginning(collection); } });
@Override public void onPartitionsAssigned(Collection<TopicPartition> collection) { consumer.seekToBeginning(collection); } });
@SuppressWarnings("unchecked") ArgumentCaptor<Collection<TopicPartition>> captor = ArgumentCaptor.forClass(Collection.class); verify(consumer).seekToBeginning(captor.capture()); TopicPartition next = captor.getValue().iterator().next(); assertThat(next.topic()).isEqualTo(topic);
@Override public void onPartitionsAssigned(Consumer<?, ?> consumer, Collection<TopicPartition> tps) { if (initialAssignment.getAndSet(false)) { if ("earliest".equals(resetTo)) { consumer.seekToBeginning(tps); } else if ("latest".equals(resetTo)) { consumer.seekToEnd(tps); } } } });
@Override public void onPartitionsAssigned(Consumer<?, ?> consumer, Collection<TopicPartition> tps) { if (initialAssignment.getAndSet(false)) { if ("earliest".equals(resetTo)) { consumer.seekToBeginning(tps); } else if ("latest".equals(resetTo)) { consumer.seekToEnd(tps); } } } });
@Override public CloseableIteration<ChangeLogEntry<QueryChange>, QueryChangeLogException> readFromStart() throws QueryChangeLogException { final TopicPartition part = new TopicPartition(topic, 0); consumer.assign(Lists.newArrayList(part)); consumer.seekToBeginning(Lists.newArrayList(part)); return new QueryChangeLogEntryIter(consumer); }
@Override public void seekToBeginning(Collection<TopicPartition> partitions) { _kafkaConsumer.seekToBeginning(partitions); for (TopicPartition tp : partitions) { _consumerRecordsProcessor.clear(tp); // We set the high watermark to 0 if user is seeking to beginning. _consumerRecordsProcessor.setPartitionConsumerHighWaterMark(tp, 0L); } }
@Override public void reinitializeStateStoresForPartitions(final Collection<TopicPartition> partitions, final InternalProcessorContext processorContext) { super.reinitializeStateStoresForPartitions( log, globalStores, topology.storeToChangelogTopic(), partitions, processorContext); globalConsumer.assign(partitions); globalConsumer.seekToBeginning(partitions); }