public Node controller() { return cluster.controller(); }
public void clearController() { if (cluster.controller() != null) { log.trace("Clearing cached controller node {}.", cluster.controller()); this.cluster = new Cluster(cluster.clusterResource().clusterId(), cluster.nodes(), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null); } }
env.kafkaClient().prepareResponse(new MetadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), cluster.controller().id(), t)); env.kafkaClient().prepareResponse(new DeleteRecordsResponse(0, m));
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(new FindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
@Test(expected = InvalidTopicException.class) public void testSubscriptionOnInvalidTopic() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Cluster cluster = metadata.fetch(); PartitionAssignor assignor = new RoundRobinAssignor(); String invalidTopicName = "topic abc"; // Invalid topic name due to space List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>(); topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.INVALID_TOPIC_EXCEPTION, invalidTopicName, false, Collections.emptyList())); MetadataResponse updateResponse = new MetadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), cluster.controller().id(), topicMetadata); client.prepareMetadataUpdate(updateResponse); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true); consumer.subscribe(singleton(invalidTopicName), getConsumerRebalanceListener(consumer)); consumer.poll(Duration.ZERO); } }
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(new FindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
initializedCluster.controller().id(), Collections.emptyList()));
@Test public void testDeleteConsumerGroups() throws Exception { final HashMap<Integer, Node> nodes = new HashMap<>(); nodes.put(0, new Node(0, "localhost", 8121)); final Cluster cluster = new Cluster( "mockClusterId", nodes.values(), Collections.<PartitionInfo>emptyList(), Collections.<String>emptySet(), Collections.<String>emptySet(), nodes.get(0)); final List<String> groupIds = singletonList("group-0"); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); //Retriable FindCoordinatorResponse errors should be retried env.kafkaClient().prepareResponse(new FindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); env.kafkaClient().prepareResponse(new FindCoordinatorResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Node.noNode())); env.kafkaClient().prepareResponse(new FindCoordinatorResponse(Errors.NONE, env.cluster().controller())); final Map<String, Errors> response = new HashMap<>(); response.put("group-0", Errors.NONE); env.kafkaClient().prepareResponse(new DeleteGroupsResponse(response)); final DeleteConsumerGroupsResult result = env.adminClient().deleteConsumerGroups(groupIds); final KafkaFuture<Void> results = result.deletedGroups().get("group-0"); assertNull(results.get()); //should throw error for non-retriable errors env.kafkaClient().prepareResponse(new FindCoordinatorResponse(Errors.GROUP_AUTHORIZATION_FAILED, Node.noNode())); final DeleteConsumerGroupsResult errorResult = env.adminClient().deleteConsumerGroups(groupIds); TestUtils.assertFutureError(errorResult.deletedGroups().get("group-0"), GroupAuthorizationException.class); } }
env.cluster().nodes(), env.cluster().clusterResource().clusterId(), env.cluster().controller().id(), Collections.emptyList()));