/** * Get the current leader for the given topic-partition * @param topicPartition The topic and partition we want to know the leader for * @return The node that is the leader for this topic-partition, or null if there is currently no leader */ public Node leaderFor(TopicPartition topicPartition) { PartitionInfo info = partitionsByTopicPartition.get(topicPartition); if (info == null) return null; else return info.leader(); }
static boolean someBrokerNotElectedLeader(List<PartitionInfo> partitionInfoList, Collection<Broker> brokers) { Set<Integer> brokersNotElectedLeader = new HashSet<>(brokers.size()); for (Broker broker: brokers) brokersNotElectedLeader.add(broker.id()); for (PartitionInfo partitionInfo : partitionInfoList) { if (partitionInfo.leader() != null) brokersNotElectedLeader.remove(partitionInfo.leader().id()); } return !brokersNotElectedLeader.isEmpty(); }
private static boolean leaderChanged(PartitionInfo prevPartInfo, PartitionInfo currPartInfo) { Node prevLeader = prevPartInfo.leader(); Node currLeader = currPartInfo.leader(); return !(prevLeader == null && currLeader == null) && !(prevLeader != null && currLeader != null && prevLeader.id() == currLeader.id()); }
metadata.requestUpdate(); partitionsToRetry.add(tp); } else if (currentInfo.get().leader() == null) { log.debug("Leader for partition {} is unavailable for fetching offset", tp); metadata.requestUpdate(); partitionsToRetry.add(tp); } else if (client.isUnavailable(currentInfo.get().leader())) { client.maybeThrowAuthFailure(currentInfo.get().leader()); currentInfo.get().leader(), tp); partitionsToRetry.add(tp); } else { Node node = currentInfo.get().leader(); Map<TopicPartition, ListOffsetRequest.PartitionData> topicData = timestampsToSearchByNode.computeIfAbsent(node, n -> new HashMap<>());
tmpPartitionsByTopicPartition.put(new TopicPartition(p.topic(), p.partition()), p); tmpPartitionsByTopic.merge(p.topic(), Collections.singletonList(p), Utils::concatListsUnmodifiable); if (p.leader() != null) { tmpAvailablePartitionsByTopic.merge(p.topic(), Collections.singletonList(p), Utils::concatListsUnmodifiable); tmpPartitionsByNode.merge(p.leader().id(), Collections.singletonList(p), Utils::concatListsUnmodifiable);
private List<Object> getJsonPartitions(Set<PartitionInfo> partitions) { List<Object> partitionList = new ArrayList<>(); for (PartitionInfo partitionInfo : partitions) { Set<Integer> replicas = Arrays.stream(partitionInfo.replicas()).map(Node::id).collect(Collectors.toSet()); Set<Integer> inSyncReplicas = Arrays.stream(partitionInfo.inSyncReplicas()).map(Node::id).collect(Collectors.toSet()); Set<Integer> outOfSyncReplicas = new HashSet<>(replicas); outOfSyncReplicas.removeAll(inSyncReplicas); Map<String, Object> recordMap = new HashMap<>(); recordMap.put(TOPIC, partitionInfo.topic()); recordMap.put(PARTITION, partitionInfo.partition()); recordMap.put(LEADER, partitionInfo.leader() == null ? -1 : partitionInfo.leader().id()); recordMap.put(REPLICAS, replicas); recordMap.put(IN_SYNC, inSyncReplicas); recordMap.put(OUT_OF_SYNC, outOfSyncReplicas); partitionList.add(recordMap); } return partitionList; }
private void writeKafkaClusterState(StringBuilder sb, SortedSet<PartitionInfo> partitions, int topicNameLength) { for (PartitionInfo partitionInfo : partitions) { Set<String> replicas = Arrays.stream(partitionInfo.replicas()).map(Node::idString).collect(Collectors.toSet()); Set<String> inSyncReplicas = Arrays.stream(partitionInfo.inSyncReplicas()).map(Node::idString).collect(Collectors.toSet()); Set<String> outOfSyncReplicas = new HashSet<>(replicas); outOfSyncReplicas.removeAll(inSyncReplicas); sb.append(String.format("%" + topicNameLength + "s%10s%10s%40s%40s%30s%n", partitionInfo.topic(), partitionInfo.partition(), partitionInfo.leader() == null ? -1 : partitionInfo.leader().id(), replicas, inSyncReplicas, outOfSyncReplicas)); } }
@Override public KafkaPartition apply(@Nonnull PartitionInfo partitionInfo) { return new KafkaPartition.Builder().withId(partitionInfo.partition()).withTopicName(partitionInfo.topic()) .withLeaderId(partitionInfo.leader().id()) .withLeaderHostAndPort(partitionInfo.leader().host(), partitionInfo.leader().port()).build(); } };
@Test public void testNoEpoch() { metadata.update(emptyMetadataResponse(), 0L); MetadataResponse metadataResponse = TestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), (error, partition, leader, leaderEpoch, replicas, isr, offlineReplicas) -> new MetadataResponse.PartitionMetadata(error, partition, leader, Optional.empty(), replicas, isr, offlineReplicas)); metadata.update(metadataResponse, 10L); TopicPartition tp = new TopicPartition("topic-1", 0); // no epoch assertFalse(metadata.lastSeenLeaderEpoch(tp).isPresent()); // still works assertTrue(metadata.partitionInfoIfCurrent(tp).isPresent()); assertEquals(metadata.partitionInfoIfCurrent(tp).get().partition(), 0); assertEquals(metadata.partitionInfoIfCurrent(tp).get().leader().id(), 0); }
@Override public Samples getSamples(Cluster cluster, Set<TopicPartition> assignedPartitions, long startTime, long endTime, SamplingMode mode, MetricDef metricDef) throws MetricSamplingException { if (_exceptionsLeft > 0) { _exceptionsLeft--; throw new MetricSamplingException("Error"); } Set<PartitionMetricSample> partitionMetricSamples = new HashSet<>(assignedPartitions.size()); for (TopicPartition tp : assignedPartitions) { PartitionMetricSample sample = new PartitionMetricSample(cluster.partition(tp).leader().id(), tp); long now = TIME.milliseconds(); for (Resource resource : Resource.cachedValues()) { for (MetricInfo metricInfo : KafkaMetricDef.resourceToMetricInfo(resource)) { sample.record(metricInfo, now); } } sample.close(now); partitionMetricSamples.add(sample); } return new Samples(partitionMetricSamples, Collections.emptySet()); }
clusterModel.handleDeadBroker(rack, replica.id(), brokerCapacity); boolean isLeader; if (partitionInfo.leader() == null) { LOG.warn("Detected offline partition {}-{}, skipping", partitionInfo.topic(), partitionInfo.partition()); continue; } else { isLeader = replica.id() == partitionInfo.leader().id();
/** * Get the current leader for the given topic-partition * @param topicPartition The topic and partition we want to know the leader for * @return The node that is the leader for this topic-partition, or null if there is currently no leader */ public Node leaderFor(TopicPartition topicPartition) { PartitionInfo info = partitionsByTopicPartition.get(topicPartition); if (info == null) return null; else return info.leader(); }
public Node getLeader(String topic, int partitionId) { KafkaConsumer consumer = createNewConsumer(DEFAULTCP); List<PartitionInfo> tmList = consumer.partitionsFor(topic); PartitionInfo partitionInfo = tmList.stream().filter(pi -> pi.partition() == partitionId).findFirst().get(); consumer.close(); return partitionInfo.leader(); }
public OutOfSyncReplica(PartitionInfo partitionInfo) { this.topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition()); this.inSyncBrokers = getInSyncReplicas(partitionInfo); this.outOfSyncBrokers = getOutOfSyncReplicas(partitionInfo); this.leader = partitionInfo.leader(); }
public void restartLeader(TopicPartition partition) { failedNodes.remove(cluster.partition(partition).leader()); }
private long getOffsets(PartitionInfo partitionInfo, long time) { return getOffsets( partitionInfo.leader(), partitionInfo.topic(), partitionInfo.partition(), time); }
public boolean leaderAvailable(TopicPartition partition) { return logs.containsKey(partition) && nodeAvailable(cluster.partition(partition).leader()); }