public List<PartitionInfo> partitionsFor(String topic) { return this.cluster.partitionsForTopic(topic); }
/** * Get the partition metadata for the given topic. This can be used for custom partitioning. * @throws AuthenticationException if authentication fails. See the exception for more details * @throws AuthorizationException if not authorized to the specified topic. See the exception for more details * @throws InterruptException if the thread is interrupted while blocked * @throws TimeoutException if metadata could not be refreshed within {@code max.block.ms} * @throws KafkaException for all Kafka-related exceptions, including the case where this method is called after producer close */ @Override public List<PartitionInfo> partitionsFor(String topic) { Objects.requireNonNull(topic, "topic cannot be null"); try { return waitOnMetadata(topic, null, maxBlockTimeMs).cluster.partitionsForTopic(topic); } catch (InterruptedException e) { throw new InterruptException(e); } }
private Set<PartitionEntity> allPartitions(Cluster cluster) { Set<PartitionEntity> allPartitions = new HashSet<>(); for (String topic : cluster.topics()) { for (PartitionInfo partitionInfo : cluster.partitionsForTopic(topic)) { TopicPartition tp = new TopicPartition(partitionInfo.topic(), partitionInfo.partition()); PartitionEntity partitionEntity = new PartitionEntity(tp); allPartitions.add(_identityEntityMap.computeIfAbsent(partitionEntity, k -> partitionEntity)); } } return allPartitions; }
private Set<Integer> brokersWithPartitions(Cluster kafkaCluster) { Set<Integer> allBrokers = new HashSet<>(); for (String topic : kafkaCluster.topics()) { for (PartitionInfo pi : kafkaCluster.partitionsForTopic(topic)) { for (Node node : pi.replicas()) { allBrokers.add(node.id()); } } } return allBrokers; }
@Override public List<Set<TopicPartition>> assignPartitions(Cluster cluster, int numMetricFetchers) { // Create an array to host the assignment of all the metric fetchers. List<Set<TopicPartition>> assignments = new ArrayList<>(); for (int i = 0; i < numMetricFetchers; i++) { assignments.add(new HashSet<>()); } int index = 0; // The total number of partitions that has been assigned. int totalPartitionAssigned = 0; for (String topic : cluster.topics()) { while (assignments.get(index % numMetricFetchers).size() > totalPartitionAssigned / numMetricFetchers) { index++; } Set<TopicPartition> assignmentForFetcher = assignments.get(index % numMetricFetchers); List<PartitionInfo> partitionsForTopic = cluster.partitionsForTopic(topic); for (PartitionInfo partitionInfo : partitionsForTopic) { assignmentForFetcher.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())); } totalPartitionAssigned += partitionsForTopic.size(); } if (LOG.isTraceEnabled()) { maybeDumpAssignments(assignments); } return assignments; }
/** * Compute the partition for the given record. * * @param topic The topic name * @param key The key to partition on (or null if no key) * @param keyBytes serialized key to partition on (or null if no key) * @param value The value to partition on or null * @param valueBytes serialized value to partition on or null * @param cluster The current cluster metadata */ public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); if (keyBytes == null) { int nextValue = nextValue(topic); List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic); if (availablePartitions.size() > 0) { int part = Utils.toPositive(nextValue) % availablePartitions.size(); return availablePartitions.get(part).partition(); } else { // no partitions are available, give a non-available partition return Utils.toPositive(nextValue) % numPartitions; } } else { // hash the keyBytes to choose a partition return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions; } }
public void run() { try { while (metadata.fetch().partitionsForTopic(topic).isEmpty()) metadata.awaitUpdate(metadata.requestUpdate(), maxWaitMs); } catch (Exception e) { backgroundError.set(e); } } };
List<PartitionInfo> partitionInfos = cluster.partitionsForTopic(topicName); List<TopicPartitionInfo> partitions = new ArrayList<>(partitionInfos.size()); for (PartitionInfo partitionInfo : partitionInfos) {
try { Cluster cluster = this.metadata.fetch(); List<PartitionInfo> parts = cluster.partitionsForTopic(topic); if (!parts.isEmpty()) return parts;
for (PartitionInfo partitionInfo : _kafkaCluster.partitionsForTopic(topic)) { if (partitionInfo.leader() == null) { continue;
/** * Gather the Kafka partition state within the given under replicated, offline, and other partitions (if verbose). * * @param underReplicatedPartitions state of under replicated partitions. * @param offlinePartitions state of offline partitions. * @param otherPartitions state of partitions other than offline or urp. * @param verbose true if requested to gather state of partitions other than offline or urp. */ private void populateKafkaPartitionState(Set<PartitionInfo> underReplicatedPartitions, Set<PartitionInfo> offlinePartitions, Set<PartitionInfo> otherPartitions, boolean verbose) { for (String topic : _kafkaCluster.topics()) { for (PartitionInfo partitionInfo : _kafkaCluster.partitionsForTopic(topic)) { boolean isURP = partitionInfo.inSyncReplicas().length != partitionInfo.replicas().length; if (isURP || verbose) { boolean isOffline = partitionInfo.inSyncReplicas().length == 0; if (isOffline) { offlinePartitions.add(partitionInfo); } else if (isURP) { underReplicatedPartitions.add(partitionInfo); } else { // verbose -- other otherPartitions.add(partitionInfo); } } } } }
if (!this.cluster.partitionsForTopic(record.topic()).isEmpty()) partition = partition(record, this.cluster); TopicPartition topicPartition = new TopicPartition(record.topic(), partition);
/** * computes partition for given record. */ private int partition(ProducerRecord<K, V> record, Cluster cluster) { Integer partition = record.partition(); String topic = record.topic(); if (partition != null) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); // they have given us a partition, use it if (partition < 0 || partition >= numPartitions) throw new IllegalArgumentException("Invalid partition given with record: " + partition + " is not in the range [0..." + numPartitions + "]."); return partition; } byte[] keyBytes = keySerializer.serialize(topic, record.headers(), record.key()); byte[] valueBytes = valueSerializer.serialize(topic, record.headers(), record.value()); return this.partitioner.partition(topic, record.key(), keyBytes, record.value(), valueBytes, cluster); }
return true; for (PartitionInfo prevPartInfo : prev.partitionsForTopic(topic)) { PartitionInfo currPartInfo = curr.partition(new TopicPartition(prevPartInfo.topic(), prevPartInfo.partition())); if (leaderChanged(prevPartInfo, currPartInfo) || replicaListChanged(prevPartInfo, currPartInfo)) {
HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>(); for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.partitionsForTopic(topic)); return topicsPartitionInfos;
assertEquals(cluster.partitionsForTopic("topic1").size(), 2); assertEquals(cluster.partitionsForTopic("topic2").size(), 3);
@Override public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); if (keyBytes == null) { int nextValue = roundRobin.getAndIncrement(); return Utils.toPositive(nextValue) % numPartitions; } else { // hash the keyBytes to choose a partition return Utils.toPositive(xxHasher.hash(keyBytes, 0, keyBytes.length, SEED)) % numPartitions; } }
@Before public void setup() { cluster = PowerMockito.mock(Cluster.class); List<PartitionInfo> partitions = new ArrayList<>(); for (int i = 0; i < 5; i++) { partitions.add(new PartitionInfo(null, 1, null, null, null)); } when(cluster.partitionsForTopic(anyString())).thenReturn(partitions); }
private SourceTopicsInfo(final List<String> sourceTopics) { this.sourceTopics = sourceTopics; for (final String topic : sourceTopics) { final List<PartitionInfo> partitions = clusterMetadata.partitionsForTopic(topic); if (partitions.size() > maxPartitions) { maxPartitions = partitions.size(); topicWithMostPartitions = partitions.get(0).topic(); } } } }
@Override public List<PartitionInfo> partitionsFor(String topic) { return this._cluster.partitionsForTopic(topic); }