Refine search
@Override public Set<TopicPartition> getAllSubscribedPartitions(Consumer<?, ?> consumer) { topics.clear(); Set<TopicPartition> allPartitions = new HashSet<>(); for (Map.Entry<String, List<PartitionInfo>> entry : consumer.listTopics().entrySet()) { if (pattern.matcher(entry.getKey()).matches()) { for (PartitionInfo partitionInfo : entry.getValue()) { allPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())); topics.add(partitionInfo.topic()); } } } return allPartitions; }
Map<Integer, List<PartitionInfo>> tmpPartitionsByNode = new HashMap<>(); for (PartitionInfo p : partitions) { tmpPartitionsByTopicPartition.put(new TopicPartition(p.topic(), p.partition()), p); tmpPartitionsByTopic.merge(p.topic(), Collections.singletonList(p), Utils::concatListsUnmodifiable); if (p.leader() != null) { tmpAvailablePartitionsByTopic.merge(p.topic(), Collections.singletonList(p), Utils::concatListsUnmodifiable); tmpPartitionsByNode.merge(p.leader().id(), Collections.singletonList(p), Utils::concatListsUnmodifiable);
private PartitionInfo createPartitionInfo(String topic, int partition) { return new PartitionInfo(topic, partition, null, null, null); } }
/** * Check if the partition is currently under replicated. * @param cluster The current cluster state. * @param tp The topic partition to check. * @return True if the partition is currently under replicated. */ public static boolean isPartitionUnderReplicated(Cluster cluster, TopicPartition tp) { PartitionInfo partitionInfo = cluster.partition(tp); return partitionInfo.inSyncReplicas().length != partitionInfo.replicas().length; } }
static int getReplicationFactor(List<PartitionInfo> partitionInfoList) { if (partitionInfoList.isEmpty()) throw new RuntimeException("Partition list is empty"); int replicationFactor = partitionInfoList.get(0).replicas().length; for (PartitionInfo partitionInfo : partitionInfoList) { if (replicationFactor != partitionInfo.replicas().length) { String topic = partitionInfoList.get(0).topic(); LOG.warn("Partitions of the topic " + topic + " have different replication factor"); return -1; } } return replicationFactor; }
List<PartitionInfo> partitionInfos = partitionInfoMap.get(topic); for (PartitionInfo partitionInfo : partitionInfos) { TopicPartition tp = new TopicPartition(partitionInfo.topic(), partitionInfo.partition()); if (partitionInfo.leader().id() == brokerStats.getId()) { leaderReplicas.add(tp); for (Node node : partitionInfo.replicas()) { if (node.id() == brokerStats.getId()) { replicas.add(tp); topicNames.add(partitionInfo.topic()); for (Node node : partitionInfo.inSyncReplicas()) { if (node.id() == brokerStats.getId()) { inSyncReplicas.add(tp); brokerStats.setLeaderReplicas(new ArrayList<>()); for (TopicPartition tp : leaderReplicas) { AvroTopicPartition avroTp = new AvroTopicPartition(tp.topic(), tp.partition()); brokerStats.getLeaderReplicas().add(avroTp);
private static List<TopicPartition> fetchTopicPartitions(String topic, KafkaConsumer<byte[], byte[]> consumer) { // this will block till REQUEST_TIMEOUT_MS_CONFIG = "request.timeout.ms" // then throws org.apache.kafka.common.errors.TimeoutException if can not fetch metadata // @TODO add retry logic maybe List<PartitionInfo> partitions = consumer.partitionsFor(topic); return partitions.stream().map(p -> new TopicPartition(topic, p.partition())).collect(Collectors.toList()); }
TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition()); long lastCommittedOffset = startOffsetsMap.get(topicPartition); long latestOffset = endOffsetsMap.get(topicPartition); logger.debug("Latest offset of {} is {}", topicPartition, latestOffset); logger.debug("Last committed offset of {} is {}", topicPartition, lastCommittedOffset); KafkaPartitionScanSpec partitionScanSpec = new KafkaPartitionScanSpec(topicPartition.topic(), topicPartition.partition(), lastCommittedOffset, latestOffset); PartitionScanWork work = new PartitionScanWork(new EndpointByteMapImpl(), partitionScanSpec); Node[] inSyncReplicas = partitionInfo.inSyncReplicas(); for (Node isr : inSyncReplicas) { String host = isr.host();
private void writeKafkaClusterState(StringBuilder sb, SortedSet<PartitionInfo> partitions, int topicNameLength) { for (PartitionInfo partitionInfo : partitions) { Set<String> replicas = Arrays.stream(partitionInfo.replicas()).map(Node::idString).collect(Collectors.toSet()); Set<String> inSyncReplicas = Arrays.stream(partitionInfo.inSyncReplicas()).map(Node::idString).collect(Collectors.toSet()); Set<String> outOfSyncReplicas = new HashSet<>(replicas); outOfSyncReplicas.removeAll(inSyncReplicas); sb.append(String.format("%" + topicNameLength + "s%10s%10s%40s%40s%30s%n", partitionInfo.topic(), partitionInfo.partition(), partitionInfo.leader() == null ? -1 : partitionInfo.leader().id(), replicas, inSyncReplicas, outOfSyncReplicas)); } }
private Cluster generateExpectedCluster(ExecutionProposal proposal, TopicPartition tp) { List<Node> expectedReplicas = new ArrayList<>(proposal.oldReplicas().size()); expectedReplicas.add(new Node(0, "null", -1)); expectedReplicas.add(new Node(2, "null", -1)); Node[] isrArray = new Node[expectedReplicas.size()]; isrArray = expectedReplicas.toArray(isrArray); Set<PartitionInfo> partitions = new HashSet<>(); partitions.add(new PartitionInfo(tp.topic(), tp.partition(), expectedReplicas.get(1), isrArray, isrArray)); return new Cluster(null, expectedReplicas, partitions, Collections.emptySet(), Collections.emptySet()); }
@Override public String formatAsText() { try (KafkaConsumer consumer = new KafkaConsumer(consumerProperties) { }) { //noinspection unchecked List<PartitionInfo> partitionsInfo = consumer.partitionsFor(topic); List<TopicPartition> topicPartitions = partitionsInfo.stream() .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition())) .collect(Collectors.toList()); Map endOffsets = consumer.endOffsets(topicPartitions); Map startOffsets = consumer.beginningOffsets(topicPartitions); return partitionsInfo.stream() .map(partitionInfo -> String.format("%s [start offset = [%s], end offset = [%s]]", partitionInfo.toString(), startOffsets.get(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())), endOffsets.get(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())))) .collect(Collectors.joining("\n")); } catch (Exception e) { return String.format("ERROR fetching metadata for Topic [%s], Connection String [%s], Error [%s]", topic, consumerProperties.getProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG), e.getMessage()); } } }
@Test public void testNoEpoch() { metadata.update(emptyMetadataResponse(), 0L); MetadataResponse metadataResponse = TestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), (error, partition, leader, leaderEpoch, replicas, isr, offlineReplicas) -> new MetadataResponse.PartitionMetadata(error, partition, leader, Optional.empty(), replicas, isr, offlineReplicas)); metadata.update(metadataResponse, 10L); TopicPartition tp = new TopicPartition("topic-1", 0); // no epoch assertFalse(metadata.lastSeenLeaderEpoch(tp).isPresent()); // still works assertTrue(metadata.partitionInfoIfCurrent(tp).isPresent()); assertEquals(metadata.partitionInfoIfCurrent(tp).get().partition(), 0); assertEquals(metadata.partitionInfoIfCurrent(tp).get().leader().id(), 0); }
nodes.put(0, new Node(0, "localhost", 8121)); List<PartitionInfo> partitionInfos = new ArrayList<>(); partitionInfos.add(new PartitionInfo("my_topic", 0, nodes.get(0), new Node[] {nodes.get(0)}, new Node[] {nodes.get(0)})); partitionInfos.add(new PartitionInfo("my_topic", 1, nodes.get(0), new Node[] {nodes.get(0)}, new Node[] {nodes.get(0)})); partitionInfos.add(new PartitionInfo("my_topic", 2, null, new Node[] {nodes.get(0)}, new Node[] {nodes.get(0)})); partitionInfos.add(new PartitionInfo("my_topic", 3, nodes.get(0), new Node[] {nodes.get(0)}, new Node[] {nodes.get(0)})); partitionInfos.add(new PartitionInfo("my_topic", 4, nodes.get(0), new Node[] {nodes.get(0)}, new Node[] {nodes.get(0)})); Cluster cluster = new Cluster("mockClusterId", nodes.values(), partitionInfos, Collections.<String>emptySet(), Collections.<String>emptySet(), nodes.get(0)); TopicPartition myTopicPartition0 = new TopicPartition("my_topic", 0); TopicPartition myTopicPartition1 = new TopicPartition("my_topic", 1); TopicPartition myTopicPartition2 = new TopicPartition("my_topic", 2); TopicPartition myTopicPartition3 = new TopicPartition("my_topic", 3); TopicPartition myTopicPartition4 = new TopicPartition("my_topic", 4);
@Override protected List<KafkaTopicPartition> getAllPartitionsForTopics(List<String> topics) throws WakeupException { List<KafkaTopicPartition> partitions = new LinkedList<>(); try { for (String topic : topics) { for (PartitionInfo partitionInfo : kafkaConsumer.partitionsFor(topic)) { partitions.add(new KafkaTopicPartition(partitionInfo.topic(), partitionInfo.partition())); } } } catch (org.apache.kafka.common.errors.WakeupException e) { // rethrow our own wakeup exception throw new WakeupException(); } return partitions; }
@Override public KafkaPartition apply(@Nonnull PartitionInfo partitionInfo) { return new KafkaPartition.Builder().withId(partitionInfo.partition()).withTopicName(partitionInfo.topic()) .withLeaderId(partitionInfo.leader().id()) .withLeaderHostAndPort(partitionInfo.leader().host(), partitionInfo.leader().port()).build(); } };
Map<String, Integer> partitionCounts = new HashMap<>(); partitionCounts.put("topic-1", 1); TopicPartition tp = new TopicPartition("topic-1", 0); new MetadataResponse.PartitionMetadata(error, partition, leader, Optional.of(99), replicas, Collections.emptyList(), offlineReplicas)); metadata.update(metadataResponse, 20L); assertEquals(metadata.fetch().partition(tp).inSyncReplicas().length, 1); assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 100); new MetadataResponse.PartitionMetadata(error, partition, leader, Optional.of(100), replicas, Collections.emptyList(), offlineReplicas)); metadata.update(metadataResponse, 20L); assertEquals(metadata.fetch().partition(tp).inSyncReplicas().length, 0); assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 100);
if (info.inSyncReplicas().length < info.replicas().length && replicationFactors.get(info.topic()) > info.inSyncReplicas().length) { underReplicated.add(info); noLeaderFlags[info.partition()] = false; .map(val -> new Node((Integer) val, "", -1)).toArray(Node[]::new); PartitionInfo partitionInfo = new PartitionInfo(topic, partitionId, null, nodes, new Node[0]); underReplicated.add(partitionInfo);
Optional<PartitionInfo> currentInfo = metadata.partitionInfoIfCurrent(tp); if (!currentInfo.isPresent()) { metadata.add(tp.topic()); log.debug("Leader for partition {} is unknown for fetching offset", tp); metadata.requestUpdate(); partitionsToRetry.add(tp); } else if (currentInfo.get().leader() == null) { log.debug("Leader for partition {} is unavailable for fetching offset", tp); metadata.requestUpdate(); partitionsToRetry.add(tp); } else if (client.isUnavailable(currentInfo.get().leader())) { client.maybeThrowAuthFailure(currentInfo.get().leader()); currentInfo.get().leader(), tp); partitionsToRetry.add(tp); } else { Node node = currentInfo.get().leader(); Map<TopicPartition, ListOffsetRequest.PartitionData> topicData = timestampsToSearchByNode.computeIfAbsent(node, n -> new HashMap<>());
for (PartitionInfo partitionInfo : partitionInfos) { TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo( partitionInfo.partition(), leader(partitionInfo), Arrays.asList(partitionInfo.replicas()), Arrays.asList(partitionInfo.inSyncReplicas())); partitions.add(topicPartitionInfo);