private boolean isInIsr(Integer leader, Cluster cluster, TopicPartition tp) { return Arrays.stream(cluster.partition(tp).inSyncReplicas()).anyMatch(node -> node.id() == leader); }
/** * Check if the partition is currently under replicated. * @param cluster The current cluster state. * @param tp The topic partition to check. * @return True if the partition is currently under replicated. */ public static boolean isPartitionUnderReplicated(Cluster cluster, TopicPartition tp) { PartitionInfo partitionInfo = cluster.partition(tp); return partitionInfo.inSyncReplicas().length != partitionInfo.replicas().length; } }
private List<Object> getJsonPartitions(Set<PartitionInfo> partitions) { List<Object> partitionList = new ArrayList<>(); for (PartitionInfo partitionInfo : partitions) { Set<Integer> replicas = Arrays.stream(partitionInfo.replicas()).map(Node::id).collect(Collectors.toSet()); Set<Integer> inSyncReplicas = Arrays.stream(partitionInfo.inSyncReplicas()).map(Node::id).collect(Collectors.toSet()); Set<Integer> outOfSyncReplicas = new HashSet<>(replicas); outOfSyncReplicas.removeAll(inSyncReplicas); Map<String, Object> recordMap = new HashMap<>(); recordMap.put(TOPIC, partitionInfo.topic()); recordMap.put(PARTITION, partitionInfo.partition()); recordMap.put(LEADER, partitionInfo.leader() == null ? -1 : partitionInfo.leader().id()); recordMap.put(REPLICAS, replicas); recordMap.put(IN_SYNC, inSyncReplicas); recordMap.put(OUT_OF_SYNC, outOfSyncReplicas); partitionList.add(recordMap); } return partitionList; }
private void writeKafkaClusterState(StringBuilder sb, SortedSet<PartitionInfo> partitions, int topicNameLength) { for (PartitionInfo partitionInfo : partitions) { Set<String> replicas = Arrays.stream(partitionInfo.replicas()).map(Node::idString).collect(Collectors.toSet()); Set<String> inSyncReplicas = Arrays.stream(partitionInfo.inSyncReplicas()).map(Node::idString).collect(Collectors.toSet()); Set<String> outOfSyncReplicas = new HashSet<>(replicas); outOfSyncReplicas.removeAll(inSyncReplicas); sb.append(String.format("%" + topicNameLength + "s%10s%10s%40s%40s%30s%n", partitionInfo.topic(), partitionInfo.partition(), partitionInfo.leader() == null ? -1 : partitionInfo.leader().id(), replicas, inSyncReplicas, outOfSyncReplicas)); } }
/** * Gather the Kafka partition state within the given under replicated, offline, and other partitions (if verbose). * * @param underReplicatedPartitions state of under replicated partitions. * @param offlinePartitions state of offline partitions. * @param otherPartitions state of partitions other than offline or urp. * @param verbose true if requested to gather state of partitions other than offline or urp. */ private void populateKafkaPartitionState(Set<PartitionInfo> underReplicatedPartitions, Set<PartitionInfo> offlinePartitions, Set<PartitionInfo> otherPartitions, boolean verbose) { for (String topic : _kafkaCluster.topics()) { for (PartitionInfo partitionInfo : _kafkaCluster.partitionsForTopic(topic)) { boolean isURP = partitionInfo.inSyncReplicas().length != partitionInfo.replicas().length; if (isURP || verbose) { boolean isOffline = partitionInfo.inSyncReplicas().length == 0; if (isOffline) { offlinePartitions.add(partitionInfo); } else if (isURP) { underReplicatedPartitions.add(partitionInfo); } else { // verbose -- other otherPartitions.add(partitionInfo); } } } } }
TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo( partitionInfo.partition(), leader(partitionInfo), Arrays.asList(partitionInfo.replicas()), Arrays.asList(partitionInfo.inSyncReplicas())); partitions.add(topicPartitionInfo);
Arrays.stream(partitionInfo.replicas()).map(Node::id).collect(Collectors.toSet()); Set<Integer> inSyncReplicas = Arrays.stream(partitionInfo.inSyncReplicas()).map(Node::id).collect(Collectors.toSet()); Set<Integer> outOfSyncReplicas = new HashSet<>(replicas); outOfSyncReplicas.removeAll(inSyncReplicas);
new MetadataResponse.PartitionMetadata(error, partition, leader, Optional.of(99), replicas, Collections.emptyList(), offlineReplicas)); metadata.update(metadataResponse, 20L); assertEquals(metadata.fetch().partition(tp).inSyncReplicas().length, 1); assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 100); new MetadataResponse.PartitionMetadata(error, partition, leader, Optional.of(100), replicas, Collections.emptyList(), offlineReplicas)); metadata.update(metadataResponse, 20L); assertEquals(metadata.fetch().partition(tp).inSyncReplicas().length, 0); assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 100);
KafkaPartitionScanSpec partitionScanSpec = new KafkaPartitionScanSpec(topicPartition.topic(), topicPartition.partition(), lastCommittedOffset, latestOffset); PartitionScanWork work = new PartitionScanWork(new EndpointByteMapImpl(), partitionScanSpec); Node[] inSyncReplicas = partitionInfo.inSyncReplicas(); for (Node isr : inSyncReplicas) { String host = isr.host();
public Set<Integer> getInSyncReplicas(PartitionInfo partitionInfo) { Set<Integer> result = new HashSet<>(); for (Node node: partitionInfo.inSyncReplicas()) { result.add(node.id()); } return result; }
/** * Get the under replicated nodes from PartitionInfo */ public static Set<Node> getNotInSyncBrokers(PartitionInfo partitionInfo) { if (partitionInfo.inSyncReplicas().length == partitionInfo.replicas().length) { return new HashSet<>(); } Set<Node> nodes = new HashSet<>(Arrays.asList(partitionInfo.replicas())); for (Node node : partitionInfo.inSyncReplicas()) { nodes.remove(node); } return nodes; }
/** * Get the under replicated nodes from PartitionInfo */ public static Set<Node> getNotInSyncBrokers(PartitionInfo partitionInfo) { if (partitionInfo.inSyncReplicas().length == partitionInfo.replicas().length) { return new HashSet<>(); } Set<Node> nodes = new HashSet<>(Arrays.asList(partitionInfo.replicas())); for (Node node : partitionInfo.inSyncReplicas()) { nodes.remove(node); } return nodes; }
/** * Get the under replicated nodes from PartitionInfo */ public static Set<Integer> getOutOfSyncReplicas(PartitionInfo partitionInfo) { if (partitionInfo.inSyncReplicas().length == partitionInfo.replicas().length) { return new HashSet<>(); } Set<Node> nodes = new HashSet<>(Arrays.asList(partitionInfo.replicas())); for (Node node : partitionInfo.inSyncReplicas()) { nodes.remove(node); } return nodes.stream().map(nd -> nd.id()).collect(Collectors.toSet()); }
e.getValue() .parallelStream() .flatMap(pi -> Arrays.stream(pi.inSyncReplicas())) .count(); if (replicateCount == 0) {
@Override public KafkaConsumer<K, V> listTopics(Handler<AsyncResult<Map<String,List<PartitionInfo>>>> handler) { this.stream.listTopics(done -> { if (done.succeeded()) { // TODO: use Helper class and stream approach Map<String,List<PartitionInfo>> topics = new HashMap<>(); for (Map.Entry<String,List<org.apache.kafka.common.PartitionInfo>> topicEntry: done.result().entrySet()) { List<PartitionInfo> partitions = new ArrayList<>(); for (org.apache.kafka.common.PartitionInfo kafkaPartitionInfo: topicEntry.getValue()) { PartitionInfo partitionInfo = new PartitionInfo(); partitionInfo .setInSyncReplicas( Stream.of(kafkaPartitionInfo.inSyncReplicas()).map(Helper::from).collect(Collectors.toList())) .setLeader(Helper.from(kafkaPartitionInfo.leader())) .setPartition(kafkaPartitionInfo.partition()) .setReplicas( Stream.of(kafkaPartitionInfo.replicas()).map(Helper::from).collect(Collectors.toList())) .setTopic(kafkaPartitionInfo.topic()); partitions.add(partitionInfo); } topics.put(topicEntry.getKey(), partitions); } handler.handle(Future.succeededFuture(topics)); } else { handler.handle(Future.failedFuture(done.cause())); } }); return this; }
@Override public KafkaProducer<K, V> partitionsFor(String topic, Handler<AsyncResult<List<PartitionInfo>>> handler) { this.stream.partitionsFor(topic, done -> { if (done.succeeded()) { // TODO: use Helper class and stream approach List<PartitionInfo> partitions = new ArrayList<>(); for (org.apache.kafka.common.PartitionInfo kafkaPartitionInfo: done.result()) { PartitionInfo partitionInfo = new PartitionInfo(); partitionInfo .setInSyncReplicas( Stream.of(kafkaPartitionInfo.inSyncReplicas()).map(Helper::from).collect(Collectors.toList())) .setLeader(Helper.from(kafkaPartitionInfo.leader())) .setPartition(kafkaPartitionInfo.partition()) .setReplicas( Stream.of(kafkaPartitionInfo.replicas()).map(Helper::from).collect(Collectors.toList())) .setTopic(kafkaPartitionInfo.topic()); partitions.add(partitionInfo); } handler.handle(Future.succeededFuture(partitions)); } else { handler.handle(Future.failedFuture(done.cause())); } }); return this; }
@Override public KafkaConsumer<K, V> partitionsFor(String topic, Handler<AsyncResult<List<PartitionInfo>>> handler) { this.stream.partitionsFor(topic, done -> { if (done.succeeded()) { // TODO: use Helper class and stream approach List<PartitionInfo> partitions = new ArrayList<>(); for (org.apache.kafka.common.PartitionInfo kafkaPartitionInfo: done.result()) { PartitionInfo partitionInfo = new PartitionInfo(); partitionInfo .setInSyncReplicas( Stream.of(kafkaPartitionInfo.inSyncReplicas()).map(Helper::from).collect(Collectors.toList())) .setLeader(Helper.from(kafkaPartitionInfo.leader())) .setPartition(kafkaPartitionInfo.partition()) .setReplicas( Stream.of(kafkaPartitionInfo.replicas()).map(Helper::from).collect(Collectors.toList())) .setTopic(kafkaPartitionInfo.topic()); partitions.add(partitionInfo); } handler.handle(Future.succeededFuture(partitions)); } else { handler.handle(Future.failedFuture(done.cause())); } }); return this; }
if (info.inSyncReplicas().length < info.replicas().length && replicationFactors.get(info.topic()) > info.inSyncReplicas().length) { underReplicated.add(info);
partitionData.set(REPLICAS_KEY_NAME, replicas.toArray()); ArrayList<Integer> isr = new ArrayList<Integer>(); for (Node node: fetchPartitionData.inSyncReplicas()) isr.add(node.id()); partitionData.set(ISR_KEY_NAME, isr.toArray());
topicPartitionInfo.setLeader(tm.leader().host()); topicPartitionInfo.setIsr( Arrays.stream(tm.inSyncReplicas()) .map(node -> node.host()) .collect(toList()));