Set<Node> prevNodeSet = new HashSet<>(prev.nodes()); if (prevNodeSet.size() != curr.nodes().size()) { return true; prevNodeSet.removeAll(curr.nodes()); if (!prevNodeSet.isEmpty()) { return true; if (!prev.topics().equals(curr.topics())) { return true; for (String topic : prev.topics()) { if (!prev.partitionCountForTopic(topic).equals(curr.partitionCountForTopic(topic))) { return true; for (PartitionInfo prevPartInfo : prev.partitionsForTopic(topic)) { PartitionInfo currPartInfo = curr.partition(new TopicPartition(prevPartInfo.topic(), prevPartInfo.partition())); if (leaderChanged(prevPartInfo, currPartInfo) || replicaListChanged(prevPartInfo, currPartInfo)) { return true;
@Override public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { return this.next(cluster.availablePartitionsForTopic(topic).size()); }
public void clearController() { if (cluster.controller() != null) { log.trace("Clearing cached controller node {}.", cluster.controller()); this.cluster = new Cluster(cluster.clusterResource().clusterId(), cluster.nodes(), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null); } }
public static int totalNumPartitions(Cluster cluster) { int totalNumPartitions = 0; for (String topic : cluster.topics()) { totalNumPartitions += cluster.partitionCountForTopic(topic); } return totalNumPartitions; }
public void updatePatternSubscription(Cluster cluster) { final Set<String> topicsToSubscribe = new HashSet<>(); for (String topic : cluster.topics()) if (subscriptions.subscribedPattern().matcher(topic).matches() && !(excludeInternalTopics && cluster.internalTopics().contains(topic))) topicsToSubscribe.add(topic); subscriptions.subscribeFromPattern(topicsToSubscribe); // note we still need to update the topics contained in the metadata. Although we have // specified that all topics should be fetched, only those set explicitly will be retained metadata.setTopics(subscriptions.groupSubscription()); }
assertEquals(cluster.clusterResource().clusterId(), "dummy"); assertEquals(cluster.nodes().size(), 4); assertEquals(cluster.invalidTopics(), Collections.singleton("topic3")); assertEquals(cluster.unauthorizedTopics(), Collections.singleton("topic4")); assertEquals(cluster.topics().size(), 3); assertEquals(cluster.internalTopics(), Collections.singleton(Topic.GROUP_METADATA_TOPIC_NAME)); assertEquals(cluster.partitionsForTopic("topic1").size(), 2); assertEquals(cluster.partitionsForTopic("topic2").size(), 3); Cluster fromCluster = Cluster.bootstrap(Collections.singletonList(address)); assertEquals(fromMetadata, fromCluster); Cluster fromClusterEmpty = Cluster.empty(); assertEquals(fromMetadataEmpty, fromClusterEmpty);
private Set<Integer> brokersWithPartitions(Cluster kafkaCluster) { Set<Integer> allBrokers = new HashSet<>(); for (String topic : kafkaCluster.topics()) { for (PartitionInfo pi : kafkaCluster.partitionsForTopic(topic)) { for (Node node : pi.replicas()) { allBrokers.add(node.id()); } } } return allBrokers; }
/** * Compute the partition for the given record. * * @param topic The topic name * @param key The key to partition on (or null if no key) * @param keyBytes serialized key to partition on (or null if no key) * @param value The value to partition on or null * @param valueBytes serialized value to partition on or null * @param cluster The current cluster metadata */ public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); if (keyBytes == null) { int nextValue = nextValue(topic); List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic); if (availablePartitions.size() > 0) { int part = Utils.toPositive(nextValue) % availablePartitions.size(); return availablePartitions.get(part).partition(); } else { // no partitions are available, give a non-available partition return Utils.toPositive(nextValue) % numPartitions; } } else { // hash the keyBytes to choose a partition return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions; } }
public List<PartitionInfo> partitionsFor(String topic) { return this.cluster.partitionsForTopic(topic); }
continue; if (!cluster.topics().contains(topicName)) { future.completeExceptionally(new UnknownTopicOrPartitionException("Topic " + topicName + " not found.")); continue; boolean isInternal = cluster.internalTopics().contains(topicName); List<PartitionInfo> partitionInfos = cluster.partitionsForTopic(topicName); List<TopicPartitionInfo> partitions = new ArrayList<>(partitionInfos.size()); for (PartitionInfo partitionInfo : partitionInfos) {
Cluster cluster = response.cluster(); Set<String> unauthorizedTopics = cluster.unauthorizedTopics(); if (!unauthorizedTopics.isEmpty()) throw new TopicAuthorizationException(unauthorizedTopics); for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.partitionsForTopic(topic)); return topicsPartitionInfos;
private MetadataSnapshot(SubscriptionState subscription, Cluster cluster) { Map<String, Integer> partitionsPerTopic = new HashMap<>(); for (String topic : subscription.groupSubscription()) partitionsPerTopic.put(topic, cluster.partitionCountForTopic(topic)); this.partitionsPerTopic = partitionsPerTopic; }
private void computeClusterView() { List<PartitionInfo> partitionInfos = metadataByPartition.values() .stream() .map(PartitionInfoAndEpoch::partitionInfo) .collect(Collectors.toList()); this.clusterInstance = new Cluster(clusterId, nodes, partitionInfos, unauthorizedTopics, invalidTopics, internalTopics, controller); }
@Override public List<Node> fetchNodes() { return cluster.nodes(); }
/** * Create a new mock producer with invented metadata the given autoComplete setting, partitioner and key\value serializers. * * Equivalent to {@link #MockProducer(Cluster, boolean, Partitioner, Serializer, Serializer)} new MockProducer(Cluster.empty(), autoComplete, partitioner, keySerializer, valueSerializer)} */ public MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer) { this(Cluster.empty(), autoComplete, partitioner, keySerializer, valueSerializer); }
static MetadataCache bootstrap(List<InetSocketAddress> addresses) { List<Node> nodes = new ArrayList<>(); int nodeId = -1; for (InetSocketAddress address : addresses) nodes.add(new Node(nodeId--, address.getHostString(), address.getPort())); return new MetadataCache(null, nodes, Collections.emptyList(), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Cluster.bootstrap(addresses)); }
Cluster bootstrapCluster = Cluster.bootstrap(singletonList(new InetSocketAddress("localhost", 9999))); Cluster initializedCluster = mockCluster(0); env.kafkaClient().prepareResponse(new MetadataResponse(initializedCluster.nodes(), initializedCluster.clusterResource().clusterId(), initializedCluster.controller().id(), Collections.emptyList())); Node leader = initializedCluster.nodes().get(0); MetadataResponse.PartitionMetadata partitionMetadata = new MetadataResponse.PartitionMetadata( Errors.NONE, 0, leader, Optional.of(10), singletonList(leader), singletonList(leader), singletonList(leader)); env.kafkaClient().prepareResponse(new MetadataResponse(initializedCluster.nodes(), initializedCluster.clusterResource().clusterId(), 1, singletonList(new MetadataResponse.TopicMetadata(Errors.NONE, topic, false, singletonList(partitionMetadata)))));
private boolean isInIsr(Integer leader, Cluster cluster, TopicPartition tp) { return Arrays.stream(cluster.partition(tp).inSyncReplicas()).anyMatch(node -> node.id() == leader); }
@Test public void testUnreachableBootstrapServer() throws Exception { // This tests the scenario in which the bootstrap server is unreachable for a short while, // which prevents AdminClient from being able to send the initial metadata request Cluster cluster = Cluster.bootstrap(Collections.singletonList(new InetSocketAddress("localhost", 8121))); try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, cluster)) { Cluster discoveredCluster = mockCluster(0); env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().setUnreachable(cluster.nodes().get(0), 200); env.kafkaClient().prepareResponse(body -> body instanceof MetadataRequest, new MetadataResponse(discoveredCluster.nodes(), discoveredCluster.clusterResource().clusterId(), 1, Collections.emptyList())); env.kafkaClient().prepareResponse(body -> body instanceof CreateTopicsRequest, new CreateTopicsResponse(Collections.singletonMap("myTopic", new ApiError(Errors.NONE, "")))); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all(); future.get(); } }
@Test public void testBootstrap() { String ipAddress = "140.211.11.105"; String hostName = "www.example.com"; Cluster cluster = Cluster.bootstrap(Arrays.asList( new InetSocketAddress(ipAddress, 9002), new InetSocketAddress(hostName, 9002) )); Set<String> expectedHosts = Utils.mkSet(ipAddress, hostName); Set<String> actualHosts = new HashSet<>(); for (Node node : cluster.nodes()) actualHosts.add(node.host()); assertEquals(expectedHosts, actualHosts); }