public void seekPartitionToBeginning(KafkaConsumer<?, ?> consumer, TopicPartition partition) { consumer.seekToBeginning(partition); }
@Override public void seekPartitionToBeginning(KafkaConsumer<?, ?> consumer, TopicPartition partition) { consumer.seekToBeginning(Collections.singletonList(partition)); }
@Override public void seekToEarliest(Set<StreamPartition<Integer>> partitions) { consumer.seekToBeginning(partitions .stream() .map(e -> new TopicPartition(e.getStream(), e.getPartitionId())) .collect(Collectors.toList())); }
public static long getEarliestOffset(KafkaConsumer consumer, String topic, int partitionId) { TopicPartition topicPartition = new TopicPartition(topic, partitionId); consumer.assign(Arrays.asList(topicPartition)); consumer.seekToBeginning(Arrays.asList(topicPartition)); return consumer.position(topicPartition); }
final long nextOffset = outOfRangePartition.getValue(); consumer.seekToBeginning(Collections.singletonList(topicPartition)); final long leastAvailableOffset = consumer.position(topicPartition);
_consumer.seekToBeginning(partitionWithNoRecentMessage);
public TopicStreamWriter( final SchemaRegistryClient schemaRegistryClient, final Map<String, Object> consumerProperties, final String topicName, final long interval, final Duration disconnectCheckInterval, final boolean fromBeginning ) { this.schemaRegistryClient = schemaRegistryClient; this.topicName = topicName; this.messagesWritten = 0; this.disconnectCheckInterval = Objects .requireNonNull(disconnectCheckInterval, "disconnectCheckInterval"); this.topicConsumer = new KafkaConsumer<>( consumerProperties, new StringDeserializer(), new BytesDeserializer() ); final List<TopicPartition> topicPartitions = topicConsumer.partitionsFor(topicName) .stream() .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition())) .collect(Collectors.toList()); topicConsumer.assign(topicPartitions); if (fromBeginning) { topicConsumer.seekToBeginning(topicPartitions); } this.interval = interval; }
@Override public void subscribe(final Flow.Subscriber<Collection<String>> subscriber) { final KafkaConsumer<String, Bytes> topicConsumer = new KafkaConsumer<>( consumerProperties, new StringDeserializer(), new BytesDeserializer() ); log.info("Running consumer for topic {}", topicName); final List<TopicPartition> topicPartitions = topicConsumer.partitionsFor(topicName) .stream() .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition())) .collect(Collectors.toList()); topicConsumer.assign(topicPartitions); if (fromBeginning) { topicConsumer.seekToBeginning(topicPartitions); } subscriber.onSubscribe( new PrintSubscription( subscriber, topicConsumer, new RecordFormatter(schemaRegistryClient, topicName) ) ); }
consumerTmp.seekToBeginning(Collections.singletonList(newPartitionState.getKafkaPartitionHandle())); newPartitionState.setOffset(consumerTmp.position(newPartitionState.getKafkaPartitionHandle()) - 1); } else if (newPartitionState.getOffset() == KafkaTopicPartitionStateSentinel.LATEST_OFFSET) {
private void checkData() { Set<TopicPartition> assignment = Collections.singleton(new TopicPartition(TOPIC, 0)); consumer.assign(assignment); consumer.seekToBeginning(assignment); long numRecords = 0; boolean emptyPoll = false; while (numRecords < RECORD_NUMBER && !emptyPoll) { ConsumerRecords<byte[], byte[]> records = consumer.poll(Duration.ofMillis(1000)); Assert.assertFalse(records.records(new TopicPartition(TOPIC, 0)) .stream() .anyMatch(consumerRecord -> !RECORDS_WRITABLES.contains(new KafkaWritable(0, consumerRecord.timestamp(), consumerRecord.value(), consumerRecord.key())))); emptyPoll = records.isEmpty(); numRecords += records.count(); } Assert.assertEquals(RECORD_NUMBER, numRecords); } }
@Test public void verifyNoCoordinatorLookupForManualAssignmentWithSeek() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true); consumer.assign(singleton(tp0)); consumer.seekToBeginning(singleton(tp0)); // there shouldn't be any need to lookup the coordinator or fetch committed offsets. // we just lookup the starting position and send the record fetch. client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 50L))); client.prepareResponse(fetchResponse(tp0, 50L, 5)); ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1)); assertEquals(5, records.count()); assertEquals(55L, consumer.position(tp0)); consumer.close(Duration.ofMillis(0)); }
consumer.assign(Arrays.asList(tp0, tp1)); consumer.seekToEnd(singleton(tp0)); consumer.seekToBeginning(singleton(tp1));
Set<TopicPartition> assignment = Collections.singleton(new TopicPartition(topic, 0)); consumer.assign(assignment); consumer.seekToBeginning(assignment); long numRecords = 0; while (numRecords < RECORD_NUMBER) {
kafkaConsumer.seekToBeginning(assignments); for (TopicPartition topicPartition : assignments) { startOffsetsMap.put(topicPartition, kafkaConsumer.position(topicPartition));
consumer.seekToBeginning(assignment); long numRecords = 0; @SuppressWarnings("unchecked") final List<ConsumerRecord<byte[], byte[]>> actualRecords = new ArrayList();
consumer.seekToBeginning(singleton(tp0));
if (flag == -2) { kafkaConsumer.seekToBeginning(assignments); for (TopicPartition topicPartition : assignments) { offsetsMap.put(topicPartition, kafkaConsumer.position(topicPartition));
consumer.seekToBeginning(singleton(tp0));
client.seekToBeginning(inputAndInternalTopicPartitions);
@Override public void toStart() { log.debug("toStart: " + id); lastOffsets.clear(); records.clear(); consumer.seekToBeginning(Collections.emptyList()); consumerMoved = true; }