public static int totalNumPartitions(Cluster cluster) { int totalNumPartitions = 0; for (String topic : cluster.topics()) { totalNumPartitions += cluster.partitionCountForTopic(topic); } return totalNumPartitions; }
@Override public void onMetadataUpdate(Cluster cluster, Set<String> unavailableTopics) { topics.clear(); topics.addAll(cluster.topics()); } });
@Override public void onMetadataUpdate(Cluster cluster, Set<String> unavailableTopics) { topics.clear(); topics.addAll(cluster.topics()); } };
public void updatePatternSubscription(Cluster cluster) { final Set<String> topicsToSubscribe = new HashSet<>(); for (String topic : cluster.topics()) if (subscriptions.subscribedPattern().matcher(topic).matches() && !(excludeInternalTopics && cluster.internalTopics().contains(topic))) topicsToSubscribe.add(topic); subscriptions.subscribeFromPattern(topicsToSubscribe); // note we still need to update the topics contained in the metadata. Although we have // specified that all topics should be fetched, only those set explicitly will be retained metadata.setTopics(subscriptions.groupSubscription()); }
private Set<PartitionEntity> allPartitions(Cluster cluster) { Set<PartitionEntity> allPartitions = new HashSet<>(); for (String topic : cluster.topics()) { for (PartitionInfo partitionInfo : cluster.partitionsForTopic(topic)) { TopicPartition tp = new TopicPartition(partitionInfo.topic(), partitionInfo.partition()); PartitionEntity partitionEntity = new PartitionEntity(tp); allPartitions.add(_identityEntityMap.computeIfAbsent(partitionEntity, k -> partitionEntity)); } } return allPartitions; }
@Override public void run() { _allTopics.addAll(_metadataClient.refreshMetadata().cluster().topics()); _refreshCount++; if (_refreshCount % 10 == 0) { _partitionMetricSampleAggregator.retainEntityGroup(_allTopics); _allTopics.clear(); } } }
private Set<Integer> brokersWithPartitions(Cluster kafkaCluster) { Set<Integer> allBrokers = new HashSet<>(); for (String topic : kafkaCluster.topics()) { for (PartitionInfo pi : kafkaCluster.partitionsForTopic(topic)) { for (Node node : pi.replicas()) { allBrokers.add(node.id()); } } } return allBrokers; }
@Override public List<Set<TopicPartition>> assignPartitions(Cluster cluster, int numMetricFetchers) { // Create an array to host the assignment of all the metric fetchers. List<Set<TopicPartition>> assignments = new ArrayList<>(); for (int i = 0; i < numMetricFetchers; i++) { assignments.add(new HashSet<>()); } int index = 0; // The total number of partitions that has been assigned. int totalPartitionAssigned = 0; for (String topic : cluster.topics()) { while (assignments.get(index % numMetricFetchers).size() > totalPartitionAssigned / numMetricFetchers) { index++; } Set<TopicPartition> assignmentForFetcher = assignments.get(index % numMetricFetchers); List<PartitionInfo> partitionsForTopic = cluster.partitionsForTopic(topic); for (PartitionInfo partitionInfo : partitionsForTopic) { assignmentForFetcher.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())); } totalPartitionAssigned += partitionsForTopic.size(); } if (LOG.isTraceEnabled()) { maybeDumpAssignments(assignments); } return assignments; }
continue; if (!cluster.topics().contains(topicName)) { future.completeExceptionally(new UnknownTopicOrPartitionException("Topic " + topicName + " not found.")); continue;
Map<Integer, Integer> replicaCountByBrokerId) { for (String topic : _kafkaCluster.topics()) { for (PartitionInfo partitionInfo : _kafkaCluster.partitionsForTopic(topic)) { if (partitionInfo.leader() == null) {
int topicNameLength = _kafkaCluster.topics().stream().mapToInt(String::length).max().orElse(20) + 5;
/** * Gather the Kafka partition state within the given under replicated, offline, and other partitions (if verbose). * * @param underReplicatedPartitions state of under replicated partitions. * @param offlinePartitions state of offline partitions. * @param otherPartitions state of partitions other than offline or urp. * @param verbose true if requested to gather state of partitions other than offline or urp. */ private void populateKafkaPartitionState(Set<PartitionInfo> underReplicatedPartitions, Set<PartitionInfo> offlinePartitions, Set<PartitionInfo> otherPartitions, boolean verbose) { for (String topic : _kafkaCluster.topics()) { for (PartitionInfo partitionInfo : _kafkaCluster.partitionsForTopic(topic)) { boolean isURP = partitionInfo.inSyncReplicas().length != partitionInfo.replicas().length; if (isURP || verbose) { boolean isOffline = partitionInfo.inSyncReplicas().length == 0; if (isOffline) { offlinePartitions.add(partitionInfo); } else if (isURP) { underReplicatedPartitions.add(partitionInfo); } else { // verbose -- other otherPartitions.add(partitionInfo); } } } } }
for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.partitionsForTopic(topic)); return topicsPartitionInfos;
assertEquals(cluster.topics().size(), 3); assertEquals(cluster.internalTopics(), Collections.singleton(Topic.GROUP_METADATA_TOPIC_NAME));
new LoadMonitorTaskRunner(config, fetcherManager, mockPartitionMetricSampleAggregator, mockBrokerMetricSampleAggregator, metadataClient, TIME); while (metadata.fetch().topics().size() < NUM_TOPICS) { Thread.sleep(10); metadataClient.refreshMetadata();
new LoadMonitorTaskRunner(config, fetcherManager, mockMetricSampleAggregator, mockBrokerMetricSampleAggregator, metadataClient, TIME); while (metadata.fetch().topics().size() < 100) { metadataClient.refreshMetadata();
private boolean isInitialized() { return clusterMetadata != null && !clusterMetadata.topics().isEmpty(); }
@Override public void subscribe(Pattern pattern, ConsumerRebalanceListener callback) { acquire(); try { Set<String> topics = new HashSet<>(); Set<String> allTopics = cluster.cluster().topics(); for (String topic : allTopics) { if (pattern.matcher(topic).matches()) topics.add(topic); } subscribe(topics, callback); } finally { release(); } }
@Override public Map<String, List<PartitionInfo>> listTopics() { acquire(); try { Map<String, List<PartitionInfo>> topicInfo = new HashMap<>(); Set<String> topics = cluster.cluster().topics(); for (String topic : topics) topicInfo.put(topic, cluster.cluster().partitionsForTopic(topic)); return topicInfo; } finally { release(); } }