@Override public Set<TopicPartition> getAllSubscribedPartitions(Consumer<?, ?> consumer) { topics.clear(); Set<TopicPartition> allPartitions = new HashSet<>(); for (Map.Entry<String, List<PartitionInfo>> entry : consumer.listTopics().entrySet()) { if (pattern.matcher(entry.getKey()).matches()) { for (PartitionInfo partitionInfo : entry.getValue()) { allPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())); topics.add(partitionInfo.topic()); } } } return allPartitions; }
@Override public Set<TopicPartition> getAllSubscribedPartitions(Consumer<?, ?> consumer) { Set<TopicPartition> allPartitions = new HashSet<>(); for (String topic : topics) { List<PartitionInfo> partitionInfoList = consumer.partitionsFor(topic); if (partitionInfoList != null) { for (PartitionInfo partitionInfo : partitionInfoList) { allPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())); } } else { LOG.warn("Topic {} not found, skipping addition of the topic", topic); } } return allPartitions; }
Map<Integer, List<PartitionInfo>> tmpPartitionsByNode = new HashMap<>(); for (PartitionInfo p : partitions) { tmpPartitionsByTopicPartition.put(new TopicPartition(p.topic(), p.partition()), p); tmpPartitionsByTopic.merge(p.topic(), Collections.singletonList(p), Utils::concatListsUnmodifiable); if (p.leader() != null) { tmpAvailablePartitionsByTopic.merge(p.topic(), Collections.singletonList(p), Utils::concatListsUnmodifiable); tmpPartitionsByNode.merge(p.leader().id(), Collections.singletonList(p), Utils::concatListsUnmodifiable);
static int getReplicationFactor(List<PartitionInfo> partitionInfoList) { if (partitionInfoList.isEmpty()) throw new RuntimeException("Partition list is empty"); int replicationFactor = partitionInfoList.get(0).replicas().length; for (PartitionInfo partitionInfo : partitionInfoList) { if (replicationFactor != partitionInfo.replicas().length) { String topic = partitionInfoList.get(0).topic(); LOG.warn("Partitions of the topic " + topic + " have different replication factor"); return -1; } } return replicationFactor; }
private void prepareConsumers() { int numConsumers = _consumers.size(); List<List<TopicPartition>> assignments = new ArrayList<>(); for (int i = 0; i < numConsumers; i++) { assignments.add(new ArrayList<>()); } int j = 0; for (String topic : Arrays.asList(_partitionMetricSampleStoreTopic, _brokerMetricSampleStoreTopic)) { for (PartitionInfo partInfo : _consumers.get(0).partitionsFor(topic)) { assignments.get(j++ % numConsumers).add(new TopicPartition(partInfo.topic(), partInfo.partition())); } } for (int i = 0; i < numConsumers; i++) { _consumers.get(i).assign(assignments.get(i)); } }
MetadataCache(String clusterId, List<Node> nodes, Collection<PartitionInfoAndEpoch> partitions, Set<String> unauthorizedTopics, Set<String> invalidTopics, Set<String> internalTopics, Node controller, Cluster clusterInstance) { this.clusterId = clusterId; this.nodes = nodes; this.unauthorizedTopics = unauthorizedTopics; this.invalidTopics = invalidTopics; this.internalTopics = internalTopics; this.controller = controller; this.metadataByPartition = new HashMap<>(partitions.size()); for (PartitionInfoAndEpoch p : partitions) { this.metadataByPartition.put(new TopicPartition(p.partitionInfo().topic(), p.partitionInfo().partition()), p); } if (clusterInstance == null) { computeClusterView(); } else { this.clusterInstance = clusterInstance; } }
@Override public String formatAsText() { try (KafkaConsumer consumer = new KafkaConsumer(consumerProperties) { }) { //noinspection unchecked List<PartitionInfo> partitionsInfo = consumer.partitionsFor(topic); List<TopicPartition> topicPartitions = partitionsInfo.stream() .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition())) .collect(Collectors.toList()); Map endOffsets = consumer.endOffsets(topicPartitions); Map startOffsets = consumer.beginningOffsets(topicPartitions); return partitionsInfo.stream() .map(partitionInfo -> String.format("%s [start offset = [%s], end offset = [%s]]", partitionInfo.toString(), startOffsets.get(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())), endOffsets.get(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())))) .collect(Collectors.joining("\n")); } catch (Exception e) { return String.format("ERROR fetching metadata for Topic [%s], Connection String [%s], Error [%s]", topic, consumerProperties.getProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG), e.getMessage()); } } }
@Override protected List<KafkaTopicPartition> getAllPartitionsForTopics(List<String> topics) throws WakeupException { List<KafkaTopicPartition> partitions = new LinkedList<>(); try { for (String topic : topics) { for (PartitionInfo partitionInfo : kafkaConsumer.partitionsFor(topic)) { partitions.add(new KafkaTopicPartition(partitionInfo.topic(), partitionInfo.partition())); } } } catch (org.apache.kafka.common.errors.WakeupException e) { // rethrow our own wakeup exception throw new WakeupException(); } return partitions; }
@Override protected List<KafkaTopicPartition> getAllPartitionsForTopics(List<String> topics) throws AbstractPartitionDiscoverer.WakeupException { List<KafkaTopicPartition> partitions = new LinkedList<>(); try { for (String topic : topics) { for (PartitionInfo partitionInfo : kafkaConsumer.partitionsFor(topic)) { partitions.add(new KafkaTopicPartition(partitionInfo.topic(), partitionInfo.partition())); } } } catch (org.apache.kafka.common.errors.WakeupException e) { // rethrow our own wakeup exception throw new AbstractPartitionDiscoverer.WakeupException(); } return partitions; }
private Set<PartitionEntity> allPartitions(Cluster cluster) { Set<PartitionEntity> allPartitions = new HashSet<>(); for (String topic : cluster.topics()) { for (PartitionInfo partitionInfo : cluster.partitionsForTopic(topic)) { TopicPartition tp = new TopicPartition(partitionInfo.topic(), partitionInfo.partition()); PartitionEntity partitionEntity = new PartitionEntity(tp); allPartitions.add(_identityEntityMap.computeIfAbsent(partitionEntity, k -> partitionEntity)); } } return allPartitions; }
private List<Object> getJsonPartitions(Set<PartitionInfo> partitions) { List<Object> partitionList = new ArrayList<>(); for (PartitionInfo partitionInfo : partitions) { Set<Integer> replicas = Arrays.stream(partitionInfo.replicas()).map(Node::id).collect(Collectors.toSet()); Set<Integer> inSyncReplicas = Arrays.stream(partitionInfo.inSyncReplicas()).map(Node::id).collect(Collectors.toSet()); Set<Integer> outOfSyncReplicas = new HashSet<>(replicas); outOfSyncReplicas.removeAll(inSyncReplicas); Map<String, Object> recordMap = new HashMap<>(); recordMap.put(TOPIC, partitionInfo.topic()); recordMap.put(PARTITION, partitionInfo.partition()); recordMap.put(LEADER, partitionInfo.leader() == null ? -1 : partitionInfo.leader().id()); recordMap.put(REPLICAS, replicas); recordMap.put(IN_SYNC, inSyncReplicas); recordMap.put(OUT_OF_SYNC, outOfSyncReplicas); partitionList.add(recordMap); } return partitionList; }
private void writeKafkaClusterState(StringBuilder sb, SortedSet<PartitionInfo> partitions, int topicNameLength) { for (PartitionInfo partitionInfo : partitions) { Set<String> replicas = Arrays.stream(partitionInfo.replicas()).map(Node::idString).collect(Collectors.toSet()); Set<String> inSyncReplicas = Arrays.stream(partitionInfo.inSyncReplicas()).map(Node::idString).collect(Collectors.toSet()); Set<String> outOfSyncReplicas = new HashSet<>(replicas); outOfSyncReplicas.removeAll(inSyncReplicas); sb.append(String.format("%" + topicNameLength + "s%10s%10s%40s%40s%30s%n", partitionInfo.topic(), partitionInfo.partition(), partitionInfo.leader() == null ? -1 : partitionInfo.leader().id(), replicas, inSyncReplicas, outOfSyncReplicas)); } }
@Override public List<Set<TopicPartition>> assignPartitions(Cluster cluster, int numMetricFetchers) { // Create an array to host the assignment of all the metric fetchers. List<Set<TopicPartition>> assignments = new ArrayList<>(); for (int i = 0; i < numMetricFetchers; i++) { assignments.add(new HashSet<>()); } int index = 0; // The total number of partitions that has been assigned. int totalPartitionAssigned = 0; for (String topic : cluster.topics()) { while (assignments.get(index % numMetricFetchers).size() > totalPartitionAssigned / numMetricFetchers) { index++; } Set<TopicPartition> assignmentForFetcher = assignments.get(index % numMetricFetchers); List<PartitionInfo> partitionsForTopic = cluster.partitionsForTopic(topic); for (PartitionInfo partitionInfo : partitionsForTopic) { assignmentForFetcher.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())); } totalPartitionAssigned += partitionsForTopic.size(); } if (LOG.isTraceEnabled()) { maybeDumpAssignments(assignments); } return assignments; }
/** * A helper function to get the number of leader partitions for each topic on each broker. It is useful to * derive the partition level IO from the topic level IO on a broker. * TODO: create open source KIP to provide per partition IO metrics. */ private Map<Integer, Map<String, Integer>> leaderDistributionStats(Cluster cluster) { Map<Integer, Map<String, Integer>> stats = new HashMap<>(); for (Node node : cluster.nodes()) { Map<String, Integer> numLeadersByTopic = new HashMap<>(); stats.put(node.id(), numLeadersByTopic); for (PartitionInfo partitionInfo : cluster.partitionsForNode(node.id())) { numLeadersByTopic.merge(partitionInfo.topic(), 1, (v0, v1) -> v0 + v1); } } return stats; }
private static void triggerPreferredLeaderElection(KafkaZkClient zkClient, List<PartitionInfo> partitionInfoList) { scala.collection.mutable.HashSet<TopicPartition> scalaPartitionInfoSet = new scala.collection.mutable.HashSet<>(); for (PartitionInfo javaPartitionInfo : partitionInfoList) { scalaPartitionInfoSet.add(new TopicPartition(javaPartitionInfo.topic(), javaPartitionInfo.partition())); } PreferredReplicaLeaderElectionCommand.writePreferredReplicaElectionData(zkClient, scalaPartitionInfoSet); }
@Override public List<WorkUnit> getWorkunits(SourceState state) { Config config = ConfigUtils.propertiesToConfig(state.getProperties()); Consumer<String, byte[]> consumer = getKafkaConsumer(config); LOG.debug("Consumer is {}", consumer); String topic = ConfigUtils.getString(config, TOPIC_WHITELIST, StringUtils.EMPTY); // TODO: fix this to use the new API when KafkaWrapper is fixed List<WorkUnit> workUnits = new ArrayList<WorkUnit>(); List<PartitionInfo> topicPartitions; topicPartitions = consumer.partitionsFor(topic); LOG.info("Partition count is {}", topicPartitions.size()); for (PartitionInfo topicPartition : topicPartitions) { Extract extract = this.createExtract(DEFAULT_TABLE_TYPE, DEFAULT_NAMESPACE_NAME, topicPartition.topic()); LOG.info("Partition info is {}", topicPartition); WorkUnit workUnit = WorkUnit.create(extract); setTopicNameInState(workUnit, topicPartition.topic()); workUnit.setProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY, topicPartition.topic()); setPartitionId(workUnit, topicPartition.partition()); workUnits.add(workUnit); } return workUnits; }
public TopicStreamWriter( final SchemaRegistryClient schemaRegistryClient, final Map<String, Object> consumerProperties, final String topicName, final long interval, final Duration disconnectCheckInterval, final boolean fromBeginning ) { this.schemaRegistryClient = schemaRegistryClient; this.topicName = topicName; this.messagesWritten = 0; this.disconnectCheckInterval = Objects .requireNonNull(disconnectCheckInterval, "disconnectCheckInterval"); this.topicConsumer = new KafkaConsumer<>( consumerProperties, new StringDeserializer(), new BytesDeserializer() ); final List<TopicPartition> topicPartitions = topicConsumer.partitionsFor(topicName) .stream() .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition())) .collect(Collectors.toList()); topicConsumer.assign(topicPartitions); if (fromBeginning) { topicConsumer.seekToBeginning(topicPartitions); } this.interval = interval; }
@Override public void subscribe(final Flow.Subscriber<Collection<String>> subscriber) { final KafkaConsumer<String, Bytes> topicConsumer = new KafkaConsumer<>( consumerProperties, new StringDeserializer(), new BytesDeserializer() ); log.info("Running consumer for topic {}", topicName); final List<TopicPartition> topicPartitions = topicConsumer.partitionsFor(topicName) .stream() .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition())) .collect(Collectors.toList()); topicConsumer.assign(topicPartitions); if (fromBeginning) { topicConsumer.seekToBeginning(topicPartitions); } subscriber.onSubscribe( new PrintSubscription( subscriber, topicConsumer, new RecordFormatter(schemaRegistryClient, topicName) ) ); }
do { PartitionInfo part = parts.get(drainIndex); TopicPartition tp = new TopicPartition(part.topic(), part.partition()); this.drainIndex = (this.drainIndex + 1) % parts.size();
@Override public KafkaPartition apply(@Nonnull PartitionInfo partitionInfo) { return new KafkaPartition.Builder().withId(partitionInfo.partition()).withTopicName(partitionInfo.topic()) .withLeaderId(partitionInfo.leader().id()) .withLeaderHostAndPort(partitionInfo.leader().host(), partitionInfo.leader().port()).build(); } };