private static int[] getPartitionsByTopic(String topic, Producer<byte[], byte[]> producer) { // the fetched list is immutable, so we're creating a mutable copy in order to sort it List<PartitionInfo> partitionsList = new ArrayList<>(producer.partitionsFor(topic)); // sort the partitions by partition id to make sure the fetched partition list is the same across subtasks Collections.sort(partitionsList, new Comparator<PartitionInfo>() { @Override public int compare(PartitionInfo o1, PartitionInfo o2) { return Integer.compare(o1.partition(), o2.partition()); } }); int[] partitions = new int[partitionsList.size()]; for (int i = 0; i < partitions.length; i++) { partitions[i] = partitionsList.get(i).partition(); } return partitions; }
protected static int[] getPartitionsByTopic(String topic, KafkaProducer<byte[], byte[]> producer) { // the fetched list is immutable, so we're creating a mutable copy in order to sort it List<PartitionInfo> partitionsList = new ArrayList<>(producer.partitionsFor(topic)); // sort the partitions by partition id to make sure the fetched partition list is the same across subtasks Collections.sort(partitionsList, new Comparator<PartitionInfo>() { @Override public int compare(PartitionInfo o1, PartitionInfo o2) { return Integer.compare(o1.partition(), o2.partition()); } }); int[] partitions = new int[partitionsList.size()]; for (int i = 0; i < partitions.length; i++) { partitions[i] = partitionsList.get(i).partition(); } return partitions; }
private static int[] getPartitionsByTopic(String topic, Producer<byte[], byte[]> producer) { // the fetched list is immutable, so we're creating a mutable copy in order to sort it List<PartitionInfo> partitionsList = new ArrayList<>(producer.partitionsFor(topic)); // sort the partitions by partition id to make sure the fetched partition list is the same across subtasks Collections.sort(partitionsList, new Comparator<PartitionInfo>() { @Override public int compare(PartitionInfo o1, PartitionInfo o2) { return Integer.compare(o1.partition(), o2.partition()); } }); int[] partitions = new int[partitionsList.size()]; for (int i = 0; i < partitions.length; i++) { partitions[i] = partitionsList.get(i).partition(); } return partitions; }
private static List<TopicPartition> fetchTopicPartitions(String topic, KafkaConsumer<byte[], byte[]> consumer) { // this will block till REQUEST_TIMEOUT_MS_CONFIG = "request.timeout.ms" // then throws org.apache.kafka.common.errors.TimeoutException if can not fetch metadata // @TODO add retry logic maybe List<PartitionInfo> partitions = consumer.partitionsFor(topic); return partitions.stream().map(p -> new TopicPartition(topic, p.partition())).collect(Collectors.toList()); }
@Override public Set<TopicPartition> getAllSubscribedPartitions(Consumer<?, ?> consumer) { topics.clear(); Set<TopicPartition> allPartitions = new HashSet<>(); for (Map.Entry<String, List<PartitionInfo>> entry : consumer.listTopics().entrySet()) { if (pattern.matcher(entry.getKey()).matches()) { for (PartitionInfo partitionInfo : entry.getValue()) { allPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())); topics.add(partitionInfo.topic()); } } } return allPartitions; }
@Override public Set<TopicPartition> getAllSubscribedPartitions(Consumer<?, ?> consumer) { Set<TopicPartition> allPartitions = new HashSet<>(); for (String topic : topics) { List<PartitionInfo> partitionInfoList = consumer.partitionsFor(topic); if (partitionInfoList != null) { for (PartitionInfo partitionInfo : partitionInfoList) { allPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())); } } else { LOG.warn("Topic {} not found, skipping addition of the topic", topic); } } return allPartitions; }
private Map<TopicPartition, OffsetAndMetadata> getKafkaOffsets( KafkaConsumer<String, byte[]> client) { Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); List<PartitionInfo> partitions = client.partitionsFor(topicStr); for (PartitionInfo partition : partitions) { TopicPartition key = new TopicPartition(topicStr, partition.partition()); OffsetAndMetadata offsetAndMetadata = client.committed(key); if (offsetAndMetadata != null) { offsets.put(key, offsetAndMetadata); } } return offsets; }
@Override public synchronized void subscribe(Pattern pattern, final ConsumerRebalanceListener listener) { ensureNotClosed(); committed.clear(); this.subscriptions.subscribe(pattern, listener); Set<String> topicsToSubscribe = new HashSet<>(); for (String topic: partitions.keySet()) { if (pattern.matcher(topic).matches() && !subscriptions.subscription().contains(topic)) topicsToSubscribe.add(topic); } ensureNotClosed(); this.subscriptions.subscribeFromPattern(topicsToSubscribe); final Set<TopicPartition> assignedPartitions = new HashSet<>(); for (final String topic : topicsToSubscribe) { for (final PartitionInfo info : this.partitions.get(topic)) { assignedPartitions.add(new TopicPartition(topic, info.partition())); } } subscriptions.assignFromSubscribed(assignedPartitions); }
private Map<TopicPartition, OffsetAndMetadata> getKafkaOffsets( KafkaConsumer<String, byte[]> client, String topicStr) { Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); List<PartitionInfo> partitions = client.partitionsFor(topicStr); for (PartitionInfo partition : partitions) { TopicPartition key = new TopicPartition(topicStr, partition.partition()); OffsetAndMetadata offsetAndMetadata = client.committed(key); if (offsetAndMetadata != null) { offsets.put(key, offsetAndMetadata); } } return offsets; }
MetadataCache(String clusterId, List<Node> nodes, Collection<PartitionInfoAndEpoch> partitions, Set<String> unauthorizedTopics, Set<String> invalidTopics, Set<String> internalTopics, Node controller, Cluster clusterInstance) { this.clusterId = clusterId; this.nodes = nodes; this.unauthorizedTopics = unauthorizedTopics; this.invalidTopics = invalidTopics; this.internalTopics = internalTopics; this.controller = controller; this.metadataByPartition = new HashMap<>(partitions.size()); for (PartitionInfoAndEpoch p : partitions) { this.metadataByPartition.put(new TopicPartition(p.partitionInfo().topic(), p.partitionInfo().partition()), p); } if (clusterInstance == null) { computeClusterView(); } else { this.clusterInstance = clusterInstance; } }
@Override protected List<KafkaTopicPartition> getAllPartitionsForTopics(List<String> topics) throws WakeupException { List<KafkaTopicPartition> partitions = new LinkedList<>(); try { for (String topic : topics) { for (PartitionInfo partitionInfo : kafkaConsumer.partitionsFor(topic)) { partitions.add(new KafkaTopicPartition(partitionInfo.topic(), partitionInfo.partition())); } } } catch (org.apache.kafka.common.errors.WakeupException e) { // rethrow our own wakeup exception throw new WakeupException(); } return partitions; }
@Override protected List<KafkaTopicPartition> getAllPartitionsForTopics(List<String> topics) throws AbstractPartitionDiscoverer.WakeupException { List<KafkaTopicPartition> partitions = new LinkedList<>(); try { for (String topic : topics) { for (PartitionInfo partitionInfo : kafkaConsumer.partitionsFor(topic)) { partitions.add(new KafkaTopicPartition(partitionInfo.topic(), partitionInfo.partition())); } } } catch (org.apache.kafka.common.errors.WakeupException e) { // rethrow our own wakeup exception throw new AbstractPartitionDiscoverer.WakeupException(); } return partitions; }
/** * Compute the partition for the given record. * * @param topic The topic name * @param key The key to partition on (or null if no key) * @param keyBytes serialized key to partition on (or null if no key) * @param value The value to partition on or null * @param valueBytes serialized value to partition on or null * @param cluster The current cluster metadata */ public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); if (keyBytes == null) { int nextValue = nextValue(topic); List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic); if (availablePartitions.size() > 0) { int part = Utils.toPositive(nextValue) % availablePartitions.size(); return availablePartitions.get(part).partition(); } else { // no partitions are available, give a non-available partition return Utils.toPositive(nextValue) % numPartitions; } } else { // hash the keyBytes to choose a partition return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions; } }
for (PartitionInfo partitionInfo : partitionInfos) { TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo( partitionInfo.partition(), leader(partitionInfo), Arrays.asList(partitionInfo.replicas()), Arrays.asList(partitionInfo.inSyncReplicas())); partitions.add(topicPartitionInfo);
private static void triggerPreferredLeaderElection(KafkaZkClient zkClient, List<PartitionInfo> partitionInfoList) { scala.collection.mutable.HashSet<TopicPartition> scalaPartitionInfoSet = new scala.collection.mutable.HashSet<>(); for (PartitionInfo javaPartitionInfo : partitionInfoList) { scalaPartitionInfoSet.add(new TopicPartition(javaPartitionInfo.topic(), javaPartitionInfo.partition())); } PreferredReplicaLeaderElectionCommand.writePreferredReplicaElectionData(zkClient, scalaPartitionInfoSet); }
@Test public void testNoEpoch() { metadata.update(emptyMetadataResponse(), 0L); MetadataResponse metadataResponse = TestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), (error, partition, leader, leaderEpoch, replicas, isr, offlineReplicas) -> new MetadataResponse.PartitionMetadata(error, partition, leader, Optional.empty(), replicas, isr, offlineReplicas)); metadata.update(metadataResponse, 10L); TopicPartition tp = new TopicPartition("topic-1", 0); // no epoch assertFalse(metadata.lastSeenLeaderEpoch(tp).isPresent()); // still works assertTrue(metadata.partitionInfoIfCurrent(tp).isPresent()); assertEquals(metadata.partitionInfoIfCurrent(tp).get().partition(), 0); assertEquals(metadata.partitionInfoIfCurrent(tp).get().leader().id(), 0); }
@Override public KafkaPartition apply(@Nonnull PartitionInfo partitionInfo) { return new KafkaPartition.Builder().withId(partitionInfo.partition()).withTopicName(partitionInfo.topic()) .withLeaderId(partitionInfo.leader().id()) .withLeaderHostAndPort(partitionInfo.leader().host(), partitionInfo.leader().port()).build(); } };