/** * Create an empty cluster instance with no nodes and no topic-partitions. */ public static Cluster empty() { return new Cluster(null, new ArrayList<>(0), new ArrayList<>(0), Collections.emptySet(), Collections.emptySet(), null); }
private void computeClusterView() { List<PartitionInfo> partitionInfos = metadataByPartition.values() .stream() .map(PartitionInfoAndEpoch::partitionInfo) .collect(Collectors.toList()); this.clusterInstance = new Cluster(clusterId, nodes, partitionInfos, unauthorizedTopics, invalidTopics, internalTopics, controller); }
/** * Return a copy of this cluster combined with `partitions`. */ public Cluster withPartitions(Map<TopicPartition, PartitionInfo> partitions) { Map<TopicPartition, PartitionInfo> combinedPartitions = new HashMap<>(this.partitionsByTopicPartition); combinedPartitions.putAll(partitions); return new Cluster(clusterResource.clusterId(), this.nodes, combinedPartitions.values(), new HashSet<>(this.unauthorizedTopics), new HashSet<>(this.invalidTopics), new HashSet<>(this.internalTopics), this.controller); }
/** * Create a "bootstrap" cluster using the given list of host/ports * @param addresses The addresses * @return A cluster for these hosts/ports */ public static Cluster bootstrap(List<InetSocketAddress> addresses) { List<Node> nodes = new ArrayList<>(); int nodeId = -1; for (InetSocketAddress address : addresses) nodes.add(new Node(nodeId--, address.getHostString(), address.getPort())); return new Cluster(null, true, nodes, new ArrayList<>(0), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null); }
/** * Get a snapshot of the cluster metadata from this response * @return the cluster snapshot */ public Cluster cluster() { Set<String> internalTopics = new HashSet<>(); List<PartitionInfo> partitions = new ArrayList<>(); for (TopicMetadata metadata : topicMetadata) { if (metadata.error == Errors.NONE) { if (metadata.isInternal) internalTopics.add(metadata.topic); for (PartitionMetadata partitionMetadata : metadata.partitionMetadata) { partitions.add(partitionMetaToInfo(metadata.topic, partitionMetadata)); } } } return new Cluster(this.clusterId, this.brokers, partitions, topicsByError(Errors.TOPIC_AUTHORIZATION_FAILED), topicsByError(Errors.INVALID_TOPIC_EXCEPTION), internalTopics, this.controller); }
public void clearController() { if (cluster.controller() != null) { log.trace("Clearing cached controller node {}.", cluster.controller()); this.cluster = new Cluster(cluster.clusterResource().clusterId(), cluster.nodes(), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null); } }
private static Cluster mockCluster() { HashMap<Integer, Node> nodes = new HashMap<>(); nodes.put(0, new Node(0, "localhost", 8121)); nodes.put(1, new Node(1, "localhost", 8122)); nodes.put(2, new Node(2, "localhost", 8123)); return new Cluster("mockClusterId", nodes.values(), Collections.<PartitionInfo>emptySet(), Collections.<String>emptySet(), Collections.<String>emptySet(), nodes.get(0)); }
public static Cluster clusterWith(final int nodes, final Map<String, Integer> topicPartitionCounts) { final Node[] ns = new Node[nodes]; for (int i = 0; i < nodes; i++) ns[i] = new Node(i, "localhost", 1969); final List<PartitionInfo> parts = new ArrayList<>(); for (final Map.Entry<String, Integer> topicPartition : topicPartitionCounts.entrySet()) { final String topic = topicPartition.getKey(); final int partitions = topicPartition.getValue(); for (int i = 0; i < partitions; i++) parts.add(new PartitionInfo(topic, i, ns[i % ns.length], ns, ns)); } return new Cluster("kafka-cluster", asList(ns), parts, Collections.emptySet(), Topic.INTERNAL_TOPICS); }
private static Cluster mockCluster(int controllerIndex) { HashMap<Integer, Node> nodes = new HashMap<>(); nodes.put(0, new Node(0, "localhost", 8121)); nodes.put(1, new Node(1, "localhost", 8122)); nodes.put(2, new Node(2, "localhost", 8123)); return new Cluster("mockClusterId", nodes.values(), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), nodes.get(controllerIndex)); }
private Cluster generateExpectedCluster(ExecutionProposal proposal, TopicPartition tp, boolean isLeaderMove) { List<Node> mockProposalReplicas = new ArrayList<>(proposal.oldReplicas().size()); for (Integer oldId: proposal.oldReplicas()) { mockProposalReplicas.add(new Node(oldId, "null", -1)); } Node[] isrArray = new Node[mockProposalReplicas.size()]; isrArray = mockProposalReplicas.toArray(isrArray); Set<PartitionInfo> partitions = new HashSet<>(); partitions.add(new PartitionInfo(tp.topic(), tp.partition(), mockProposalReplicas.get(isLeaderMove ? 1 : 0), isrArray, isrArray)); return new Cluster(null, mockProposalReplicas, partitions, Collections.<String>emptySet(), Collections.<String>emptySet()); }
private Cluster generateExpectedCluster(ExecutionProposal proposal, TopicPartition tp) { List<Node> expectedReplicas = new ArrayList<>(proposal.oldReplicas().size()); expectedReplicas.add(new Node(0, "null", -1)); expectedReplicas.add(new Node(2, "null", -1)); Node[] isrArray = new Node[expectedReplicas.size()]; isrArray = expectedReplicas.toArray(isrArray); Set<PartitionInfo> partitions = new HashSet<>(); partitions.add(new PartitionInfo(tp.topic(), tp.partition(), expectedReplicas.get(1), isrArray, isrArray)); return new Cluster(null, expectedReplicas, partitions, Collections.emptySet(), Collections.emptySet()); }
private Cluster getCluster(Collection<TopicPartition> partitions) { Node node0 = new Node(0, "localhost", 100, "rack0"); Node node1 = new Node(1, "localhost", 100, "rack1"); Node[] nodes = {node0, node1}; Set<Node> allNodes = new HashSet<>(2); allNodes.add(node0); allNodes.add(node1); Set<PartitionInfo> parts = new HashSet<>(partitions.size()); for (TopicPartition tp : partitions) { parts.add(new PartitionInfo(tp.topic(), tp.partition(), node0, nodes, nodes)); } return new Cluster("cluster_id", allNodes, parts, Collections.emptySet(), Collections.emptySet()); }
new PartitionInfo(topicB, 0, node0, nodes, nodes) ); Cluster testCluster = new Cluster("clusterId", asList(node0, node1, node2), allPartitions, Collections.<String>emptySet(), Collections.<String>emptySet());
@Test public void testRaiseErrorWhenNoPartitionsPendingOnDrain() throws InterruptedException { final long pid = 13131L; final short epoch = 1; doInitTransactions(pid, epoch); transactionManager.beginTransaction(); // Don't execute transactionManager.maybeAddPartitionToTransaction(tp0). This should result in an error on drain. accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), Record.EMPTY_HEADERS, null, MAX_BLOCK_TIMEOUT); Node node1 = new Node(0, "localhost", 1111); PartitionInfo part1 = new PartitionInfo(topic, 0, node1, null, null); Cluster cluster = new Cluster(null, Collections.singletonList(node1), Collections.singletonList(part1), Collections.emptySet(), Collections.emptySet()); Set<Node> nodes = new HashSet<>(); nodes.add(node1); Map<Integer, List<ProducerBatch>> drainedBatches = accumulator.drain(cluster, nodes, Integer.MAX_VALUE, time.milliseconds()); // We shouldn't drain batches which haven't been added to the transaction yet. assertTrue(drainedBatches.containsKey(node1.id())); assertTrue(drainedBatches.get(node1.id()).isEmpty()); }
@Test public void testPartitioner() throws Exception { PartitionInfo partitionInfo0 = new PartitionInfo(topic, 0, null, null, null); PartitionInfo partitionInfo1 = new PartitionInfo(topic, 1, null, null, null); Cluster cluster = new Cluster(null, new ArrayList<Node>(0), asList(partitionInfo0, partitionInfo1), Collections.<String>emptySet(), Collections.<String>emptySet()); MockProducer<String, String> producer = new MockProducer<>(cluster, true, new DefaultPartitioner(), new StringSerializer(), new StringSerializer()); ProducerRecord<String, String> record = new ProducerRecord<>(topic, "key", "value"); Future<RecordMetadata> metadata = producer.send(record); assertEquals("Partition should be correct", 1, metadata.get().partition()); producer.clear(); assertEquals("Clear should erase our history", 0, producer.history().size()); producer.close(); }
new Cluster( "mockClusterId", nodes.values(),
nodes.put(2, node2); final Cluster cluster = new Cluster( "mockClusterId", nodes.values(),
PartitionInfo part2 = new PartitionInfo(topic, 1, node2, null, null); Cluster cluster = new Cluster(null, Arrays.asList(node1, node2), Arrays.asList(part1, part2), Collections.emptySet(), Collections.emptySet()); Set<Node> nodes = new HashSet<>();
Cluster cluster = new Cluster(null, Collections.singletonList(node1), Collections.singletonList(part1), Collections.emptySet(), Collections.emptySet()); accumulator.append(tp1, time.milliseconds(), "key".getBytes(),
@Test public void testDeleteConsumerGroups() throws Exception { final HashMap<Integer, Node> nodes = new HashMap<>(); nodes.put(0, new Node(0, "localhost", 8121)); final Cluster cluster = new Cluster( "mockClusterId", nodes.values(), Collections.<PartitionInfo>emptyList(), Collections.<String>emptySet(), Collections.<String>emptySet(), nodes.get(0)); final List<String> groupIds = singletonList("group-0"); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); //Retriable FindCoordinatorResponse errors should be retried env.kafkaClient().prepareResponse(new FindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); env.kafkaClient().prepareResponse(new FindCoordinatorResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Node.noNode())); env.kafkaClient().prepareResponse(new FindCoordinatorResponse(Errors.NONE, env.cluster().controller())); final Map<String, Errors> response = new HashMap<>(); response.put("group-0", Errors.NONE); env.kafkaClient().prepareResponse(new DeleteGroupsResponse(response)); final DeleteConsumerGroupsResult result = env.adminClient().deleteConsumerGroups(groupIds); final KafkaFuture<Void> results = result.deletedGroups().get("group-0"); assertNull(results.get()); //should throw error for non-retriable errors env.kafkaClient().prepareResponse(new FindCoordinatorResponse(Errors.GROUP_AUTHORIZATION_FAILED, Node.noNode())); final DeleteConsumerGroupsResult errorResult = env.adminClient().deleteConsumerGroups(groupIds); TestUtils.assertFutureError(errorResult.deletedGroups().get("group-0"), GroupAuthorizationException.class); } }