@Test public void testAdminClientApisAuthenticationFailure() throws Exception { Cluster cluster = mockBootstrapCluster(); try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, cluster, newStrMap(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "1000"))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().createPendingAuthenticationError(cluster.nodes().get(0), TimeUnit.DAYS.toMillis(1)); callAdminClientApisAndExpectAnAuthenticationError(env); } }
@Test public void testCloseAdminClient() { try (AdminClientUnitTestEnv env = mockClientEnv()) { } }
private static AdminClientUnitTestEnv mockClientEnv(String... configVals) { return new AdminClientUnitTestEnv(mockCluster(0), configVals); }
/** * Test that we propagate exceptions encountered when fetching metadata. */ @Test public void testPropagatedMetadataFetchException() throws Exception { Cluster cluster = mockCluster(0); try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, cluster, newStrMap(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:8121", AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "10"))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().createPendingAuthenticationError(cluster.nodeById(0), TimeUnit.DAYS.toMillis(1)); env.kafkaClient().prepareResponse(new CreateTopicsResponse(Collections.singletonMap("myTopic", new ApiError(Errors.NONE, "")))); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(1000)).all(); TestUtils.assertFutureError(future, SaslAuthenticationException.class); } }
@Test public void testCreateAcls() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); // Test a call where we successfully create two ACLs. env.kafkaClient().prepareResponse(new CreateAclsResponse(0, asList(new AclCreationResponse(ApiError.NONE), new AclCreationResponse(ApiError.NONE)))); CreateAclsResult results = env.adminClient().createAcls(asList(ACL1, ACL2)); assertCollectionIs(results.values().keySet(), ACL1, ACL2); for (KafkaFuture<Void> future : results.values().values()) future.get(); results.all().get(); // Test a call where we fail to create one ACL. env.kafkaClient().prepareResponse(new CreateAclsResponse(0, asList( new AclCreationResponse(new ApiError(Errors.SECURITY_DISABLED, "Security is disabled")), new AclCreationResponse(ApiError.NONE)) )); results = env.adminClient().createAcls(asList(ACL1, ACL2)); assertCollectionIs(results.values().keySet(), ACL1, ACL2); TestUtils.assertFutureError(results.values().get(ACL1), SecurityDisabledException.class); results.values().get(ACL2).get(); TestUtils.assertFutureError(results.all(), SecurityDisabledException.class); } }
/** * Test that the client properly times out when we don't receive any metadata. */ @Test public void testTimeoutWithoutMetadata() throws Exception { try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, mockBootstrapCluster(), newStrMap(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "10"))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(new CreateTopicsResponse(Collections.singletonMap("myTopic", new ApiError(Errors.NONE, "")))); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(1000)).all(); TestUtils.assertFutureError(future, TimeoutException.class); } }
@Test public void testConnectionFailureOnMetadataUpdate() throws Exception { // This tests the scenario in which we successfully connect to the bootstrap server, but // the server disconnects before sending the full response Cluster cluster = mockBootstrapCluster(); try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, cluster)) { Cluster discoveredCluster = mockCluster(0); env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(request -> request instanceof MetadataRequest, null, true); env.kafkaClient().prepareResponse(request -> request instanceof MetadataRequest, new MetadataResponse(discoveredCluster.nodes(), discoveredCluster.clusterResource().clusterId(), 1, Collections.emptyList())); env.kafkaClient().prepareResponse(body -> body instanceof CreateTopicsRequest, new CreateTopicsResponse(Collections.singletonMap("myTopic", new ApiError(Errors.NONE, "")))); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all(); future.get(); } }
private static AdminClientConfig newConfMap(String... vals) { return new AdminClientConfig(newStrMap(vals)); }
@Test public void testGenerateClientId() { Set<String> ids = new HashSet<>(); for (int i = 0; i < 10; i++) { String id = KafkaAdminClient.generateClientId(newConfMap(AdminClientConfig.CLIENT_ID_CONFIG, "")); assertTrue("Got duplicate id " + id, !ids.contains(id)); ids.add(id); } assertEquals("myCustomId", KafkaAdminClient.generateClientId(newConfMap(AdminClientConfig.CLIENT_ID_CONFIG, "myCustomId"))); }
@Test public void testCreateTopicsRetryBackoff() throws Exception { Cluster cluster = mockCluster(0); MockTime time = new MockTime(); int retryBackoff = 100; newStrMap(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "" + retryBackoff))) { MockClient mockClient = env.kafkaClient();
@Test public void testDescribeAcls() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); // Test a call where we get back ACL1 and ACL2. env.kafkaClient().prepareResponse(new DescribeAclsResponse(0, ApiError.NONE, asList(ACL1, ACL2))); assertCollectionIs(env.adminClient().describeAcls(FILTER1).values().get(), ACL1, ACL2); // Test a call where we get back no results. env.kafkaClient().prepareResponse(new DescribeAclsResponse(0, ApiError.NONE, Collections.<AclBinding>emptySet())); assertTrue(env.adminClient().describeAcls(FILTER2).values().get().isEmpty()); // Test a call where we get back an error. env.kafkaClient().prepareResponse(new DescribeAclsResponse(0, new ApiError(Errors.SECURITY_DISABLED, "Security is disabled"), Collections.<AclBinding>emptySet())); TestUtils.assertFutureError(env.adminClient().describeAcls(FILTER2).values(), SecurityDisabledException.class); // Test a call where we supply an invalid filter. TestUtils.assertFutureError(env.adminClient().describeAcls(UNKNOWN_FILTER).values(), InvalidRequestException.class); } }
Cluster initializedCluster = mockCluster(0); newStrMap(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999", AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "10000000", AdminClientConfig.RETRIES_CONFIG, "0"))) {
@Test public void testDeleteAcls() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2)); Collection<AclBinding> deleted = results.all().get(); assertCollectionIs(deleted, ACL1, ACL2);
@Test public void testCreatePartitions() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); Map<String, ApiError> m = new HashMap<>(); m.put("my_topic", ApiError.NONE); m.put("other_topic", ApiError.fromThrowable(new InvalidTopicException("some detailed reason"))); // Test a call where one filter has an error. env.kafkaClient().prepareResponse(new CreatePartitionsResponse(0, m)); Map<String, NewPartitions> counts = new HashMap<>(); counts.put("my_topic", NewPartitions.increaseTo(3)); counts.put("other_topic", NewPartitions.increaseTo(3, asList(asList(2), asList(3)))); CreatePartitionsResult results = env.adminClient().createPartitions(counts); Map<String, KafkaFuture<Void>> values = results.values(); KafkaFuture<Void> myTopicResult = values.get("my_topic"); myTopicResult.get(); KafkaFuture<Void> otherTopicResult = values.get("other_topic"); try { otherTopicResult.get(); fail("get() should throw ExecutionException"); } catch (ExecutionException e0) { assertTrue(e0.getCause() instanceof InvalidTopicException); InvalidTopicException e = (InvalidTopicException) e0.getCause(); assertEquals("some detailed reason", e.getMessage()); } } }
@Test public void testUnreachableBootstrapServer() throws Exception { // This tests the scenario in which the bootstrap server is unreachable for a short while, // which prevents AdminClient from being able to send the initial metadata request Cluster cluster = Cluster.bootstrap(Collections.singletonList(new InetSocketAddress("localhost", 8121))); try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, cluster)) { Cluster discoveredCluster = mockCluster(0); env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().setUnreachable(cluster.nodes().get(0), 200); env.kafkaClient().prepareResponse(body -> body instanceof MetadataRequest, new MetadataResponse(discoveredCluster.nodes(), discoveredCluster.clusterResource().clusterId(), 1, Collections.emptyList())); env.kafkaClient().prepareResponse(body -> body instanceof CreateTopicsRequest, new CreateTopicsResponse(Collections.singletonMap("myTopic", new ApiError(Errors.NONE, "")))); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all(); future.get(); } }
@Test public void testCreateTopics() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(body -> body instanceof CreateTopicsRequest, new CreateTopicsResponse(Collections.singletonMap("myTopic", new ApiError(Errors.NONE, "")))); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all(); future.get(); } }
@Test public void testDescribeConfigs() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(new DescribeConfigsResponse(0, Collections.singletonMap(new ConfigResource(ConfigResource.Type.BROKER, "0"), new DescribeConfigsResponse.Config(ApiError.NONE, Collections.emptySet())))); DescribeConfigsResult result2 = env.adminClient().describeConfigs(Collections.singleton( new ConfigResource(ConfigResource.Type.BROKER, "0"))); result2.all().get(); } }
@Test public void testInvalidTopicNames() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); List<String> sillyTopicNames = asList("", null); Map<String, KafkaFuture<Void>> deleteFutures = env.adminClient().deleteTopics(sillyTopicNames).values(); for (String sillyTopicName : sillyTopicNames) { TestUtils.assertFutureError(deleteFutures.get(sillyTopicName), InvalidTopicException.class); } assertEquals(0, env.kafkaClient().inFlightRequestCount()); Map<String, KafkaFuture<TopicDescription>> describeFutures = env.adminClient().describeTopics(sillyTopicNames).values(); for (String sillyTopicName : sillyTopicNames) { TestUtils.assertFutureError(describeFutures.get(sillyTopicName), InvalidTopicException.class); } assertEquals(0, env.kafkaClient().inFlightRequestCount()); List<NewTopic> newTopics = new ArrayList<>(); for (String sillyTopicName : sillyTopicNames) { newTopics.add(new NewTopic(sillyTopicName, 1, (short) 1)); } Map<String, KafkaFuture<Void>> createFutures = env.adminClient().createTopics(newTopics).values(); for (String sillyTopicName : sillyTopicNames) { TestUtils.assertFutureError(createFutures .get(sillyTopicName), InvalidTopicException.class); } assertEquals(0, env.kafkaClient().inFlightRequestCount()); } }
@Test public void testDeleteTopics() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(body -> body instanceof DeleteTopicsRequest, new DeleteTopicsResponse(Collections.singletonMap("myTopic", Errors.NONE))); KafkaFuture<Void> future = env.adminClient().deleteTopics(Collections.singletonList("myTopic"), new DeleteTopicsOptions()).all(); future.get(); env.kafkaClient().prepareResponse(body -> body instanceof DeleteTopicsRequest, new DeleteTopicsResponse(Collections.singletonMap("myTopic", Errors.TOPIC_DELETION_DISABLED))); future = env.adminClient().deleteTopics(Collections.singletonList("myTopic"), new DeleteTopicsOptions()).all(); TestUtils.assertFutureError(future, TopicDeletionDisabledException.class); env.kafkaClient().prepareResponse(body -> body instanceof DeleteTopicsRequest, new DeleteTopicsResponse(Collections.singletonMap("myTopic", Errors.UNKNOWN_TOPIC_OR_PARTITION))); future = env.adminClient().deleteTopics(Collections.singletonList("myTopic"), new DeleteTopicsOptions()).all(); TestUtils.assertFutureError(future, UnknownTopicOrPartitionException.class); } }
@Test public void testCreateTopicsHandleNotControllerException() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponseFrom(new CreateTopicsResponse( Collections.singletonMap("myTopic", new ApiError(Errors.NOT_CONTROLLER, ""))), env.cluster().nodeById(0)); env.kafkaClient().prepareResponse(new MetadataResponse(env.cluster().nodes(), env.cluster().clusterResource().clusterId(), 1, Collections.<MetadataResponse.TopicMetadata>emptyList())); env.kafkaClient().prepareResponseFrom(new CreateTopicsResponse( Collections.singletonMap("myTopic", new ApiError(Errors.NONE, ""))), env.cluster().nodeById(1)); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all(); future.get(); } }