private List<AclBinding> getAclBindings(Map<AclBindingFilter, KafkaFuture<FilterResults>> futures) { List<AclBinding> acls = new ArrayList<>(); for (KafkaFuture<FilterResults> value: futures.values()) { FilterResults results; try { results = value.get(); } catch (Throwable e) { // This should be unreachable, since the future returned by KafkaFuture#allOf should // have failed if any Future failed. throw new KafkaException("DeleteAclsResult#all: internal error", e); } for (FilterResult result : results.values()) { if (result.exception() != null) throw result.exception(); acls.add(result.binding()); } } return acls; } }
private void tryDelete(AdminClient adminClient, String topic) throws Exception { try { adminClient.deleteTopics(Collections.singleton(topic)).all().get(DELETE_TIMEOUT_SECONDS, TimeUnit.SECONDS); } catch (TimeoutException e) { LOG.info("Did not receive delete topic response within %d seconds. Checking if it succeeded", DELETE_TIMEOUT_SECONDS); if (adminClient.listTopics().names().get(DELETE_TIMEOUT_SECONDS, TimeUnit.SECONDS).contains(topic)) { throw new Exception("Topic still exists after timeout"); } } }
@Test public void testAdminClientWithInvalidCredentials() { Map<String, Object> props = new HashMap<>(saslClientConfigs); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:" + server.port()); try (AdminClient client = AdminClient.create(props)) { DescribeTopicsResult result = client.describeTopics(Collections.singleton("test")); result.all().get(); fail("Expected an authentication error!"); } catch (Exception e) { assertTrue("Expected SaslAuthenticationException, got " + e.getCause().getClass(), e.getCause() instanceof SaslAuthenticationException); } }
public void createTopic(String topicName) throws Exception { kafkaAdminClient.createTopics(Collections.singleton(new NewTopic(topicName, 1, (short)1))) .all() .get(30, TimeUnit.SECONDS); }
@Override public void createTestTopic(String topic, int numberOfPartitions, int replicationFactor, Properties properties) { LOG.info("Creating topic {}", topic); try (AdminClient adminClient = AdminClient.create(getStandardProperties())) { NewTopic topicObj = new NewTopic(topic, numberOfPartitions, (short) replicationFactor); adminClient.createTopics(Collections.singleton(topicObj)).all().get(); } catch (Exception e) { e.printStackTrace(); fail("Create test topic : " + topic + " failed, " + e.getMessage()); } }
private static String getKafkaClusterId(final ServiceContext serviceContext) { try { return serviceContext.getAdminClient().describeCluster().clusterId().get(); } catch (final UnsupportedVersionException e) { throw new KsqlException( "The kafka brokers are incompatible with. " + "KSQL requires broker versions >= 0.10.1.x" ); } catch (final Exception e) { throw new KsqlException("Failed to get Kafka cluster information", e); } }
@Override public int getLeaderToShutDown(String topic) throws Exception { AdminClient client = AdminClient.create(getStandardProperties()); TopicDescription result = client.describeTopics(Collections.singleton(topic)).all().get().get(topic); return result.partitions().get(0).leader().id(); }
@Test public void testCreatePartitions() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); Map<String, ApiError> m = new HashMap<>(); m.put("my_topic", ApiError.NONE); m.put("other_topic", ApiError.fromThrowable(new InvalidTopicException("some detailed reason"))); // Test a call where one filter has an error. env.kafkaClient().prepareResponse(new CreatePartitionsResponse(0, m)); Map<String, NewPartitions> counts = new HashMap<>(); counts.put("my_topic", NewPartitions.increaseTo(3)); counts.put("other_topic", NewPartitions.increaseTo(3, asList(asList(2), asList(3)))); CreatePartitionsResult results = env.adminClient().createPartitions(counts); Map<String, KafkaFuture<Void>> values = results.values(); KafkaFuture<Void> myTopicResult = values.get("my_topic"); myTopicResult.get(); KafkaFuture<Void> otherTopicResult = values.get("other_topic"); try { otherTopicResult.get(); fail("get() should throw ExecutionException"); } catch (ExecutionException e0) { assertTrue(e0.getCause() instanceof InvalidTopicException); InvalidTopicException e = (InvalidTopicException) e0.getCause(); assertEquals("some detailed reason", e.getMessage()); } } }
@Test public void testCreateTopics() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(body -> body instanceof CreateTopicsRequest, new CreateTopicsResponse(Collections.singletonMap("myTopic", new ApiError(Errors.NONE, "")))); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all(); future.get(); } }
@Test public void testDescribeConfigs() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(new DescribeConfigsResponse(0, Collections.singletonMap(new ConfigResource(ConfigResource.Type.BROKER, "0"), new DescribeConfigsResponse.Config(ApiError.NONE, Collections.emptySet())))); DescribeConfigsResult result2 = env.adminClient().describeConfigs(Collections.singleton( new ConfigResource(ConfigResource.Type.BROKER, "0"))); result2.all().get(); } }
@Test public void testCompleteFutures() throws Exception { KafkaFutureImpl<Integer> future123 = new KafkaFutureImpl<>(); assertTrue(future123.complete(123)); assertEquals(Integer.valueOf(123), future123.get()); assertFalse(future123.complete(456)); assertTrue(future123.isDone()); assertFalse(future123.isCancelled()); assertFalse(future123.isCompletedExceptionally()); KafkaFuture<Integer> future456 = KafkaFuture.completedFuture(456); assertEquals(Integer.valueOf(456), future456.get()); KafkaFutureImpl<Integer> futureFail = new KafkaFutureImpl<>(); futureFail.completeExceptionally(new RuntimeException("We require more vespene gas")); try { futureFail.get(); Assert.fail("Expected an exception"); } catch (ExecutionException e) { assertEquals(RuntimeException.class, e.getCause().getClass()); Assert.assertEquals("We require more vespene gas", e.getCause().getMessage()); } }
@Test public void testCreateAcls() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); // Test a call where we successfully create two ACLs. env.kafkaClient().prepareResponse(new CreateAclsResponse(0, asList(new AclCreationResponse(ApiError.NONE), new AclCreationResponse(ApiError.NONE)))); CreateAclsResult results = env.adminClient().createAcls(asList(ACL1, ACL2)); assertCollectionIs(results.values().keySet(), ACL1, ACL2); for (KafkaFuture<Void> future : results.values().values()) future.get(); results.all().get(); // Test a call where we fail to create one ACL. env.kafkaClient().prepareResponse(new CreateAclsResponse(0, asList( new AclCreationResponse(new ApiError(Errors.SECURITY_DISABLED, "Security is disabled")), new AclCreationResponse(ApiError.NONE)) )); results = env.adminClient().createAcls(asList(ACL1, ACL2)); assertCollectionIs(results.values().keySet(), ACL1, ACL2); TestUtils.assertFutureError(results.values().get(ACL1), SecurityDisabledException.class); results.values().get(ACL2).get(); TestUtils.assertFutureError(results.all(), SecurityDisabledException.class); } }
Map<String, TopicDescription> topicDescriptions = result.all().get(); assertEquals(leader, topicDescriptions.get(topic).partitions().get(0).leader());
@Test public void testUnreachableBootstrapServer() throws Exception { // This tests the scenario in which the bootstrap server is unreachable for a short while, // which prevents AdminClient from being able to send the initial metadata request Cluster cluster = Cluster.bootstrap(Collections.singletonList(new InetSocketAddress("localhost", 8121))); try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, cluster)) { Cluster discoveredCluster = mockCluster(0); env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().setUnreachable(cluster.nodes().get(0), 200); env.kafkaClient().prepareResponse(body -> body instanceof MetadataRequest, new MetadataResponse(discoveredCluster.nodes(), discoveredCluster.clusterResource().clusterId(), 1, Collections.emptyList())); env.kafkaClient().prepareResponse(body -> body instanceof CreateTopicsRequest, new CreateTopicsResponse(Collections.singletonMap("myTopic", new ApiError(Errors.NONE, "")))); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all(); future.get(); } }
@Test public void testAllOfFuturesHandlesZeroFutures() throws Exception { KafkaFuture<Void> allFuture = KafkaFuture.allOf(); assertTrue(allFuture.isDone()); assertFalse(allFuture.isCancelled()); assertFalse(allFuture.isCompletedExceptionally()); allFuture.get(); }
@Test public void testDeleteConsumerGroups() throws Exception { final HashMap<Integer, Node> nodes = new HashMap<>(); nodes.put(0, new Node(0, "localhost", 8121)); final Cluster cluster = new Cluster( "mockClusterId", nodes.values(), Collections.<PartitionInfo>emptyList(), Collections.<String>emptySet(), Collections.<String>emptySet(), nodes.get(0)); final List<String> groupIds = singletonList("group-0"); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); //Retriable FindCoordinatorResponse errors should be retried env.kafkaClient().prepareResponse(new FindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); env.kafkaClient().prepareResponse(new FindCoordinatorResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Node.noNode())); env.kafkaClient().prepareResponse(new FindCoordinatorResponse(Errors.NONE, env.cluster().controller())); final Map<String, Errors> response = new HashMap<>(); response.put("group-0", Errors.NONE); env.kafkaClient().prepareResponse(new DeleteGroupsResponse(response)); final DeleteConsumerGroupsResult result = env.adminClient().deleteConsumerGroups(groupIds); final KafkaFuture<Void> results = result.deletedGroups().get("group-0"); assertNull(results.get()); //should throw error for non-retriable errors env.kafkaClient().prepareResponse(new FindCoordinatorResponse(Errors.GROUP_AUTHORIZATION_FAILED, Node.noNode())); final DeleteConsumerGroupsResult errorResult = env.adminClient().deleteConsumerGroups(groupIds); TestUtils.assertFutureError(errorResult.deletedGroups().get("group-0"), GroupAuthorizationException.class); } }
@Test public void testDeleteTopics() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(body -> body instanceof DeleteTopicsRequest, new DeleteTopicsResponse(Collections.singletonMap("myTopic", Errors.NONE))); KafkaFuture<Void> future = env.adminClient().deleteTopics(Collections.singletonList("myTopic"), new DeleteTopicsOptions()).all(); future.get(); env.kafkaClient().prepareResponse(body -> body instanceof DeleteTopicsRequest, new DeleteTopicsResponse(Collections.singletonMap("myTopic", Errors.TOPIC_DELETION_DISABLED))); future = env.adminClient().deleteTopics(Collections.singletonList("myTopic"), new DeleteTopicsOptions()).all(); TestUtils.assertFutureError(future, TopicDeletionDisabledException.class); env.kafkaClient().prepareResponse(body -> body instanceof DeleteTopicsRequest, new DeleteTopicsResponse(Collections.singletonMap("myTopic", Errors.UNKNOWN_TOPIC_OR_PARTITION))); future = env.adminClient().deleteTopics(Collections.singletonList("myTopic"), new DeleteTopicsOptions()).all(); TestUtils.assertFutureError(future, UnknownTopicOrPartitionException.class); } }
@Test public void testConnectionFailureOnMetadataUpdate() throws Exception { // This tests the scenario in which we successfully connect to the bootstrap server, but // the server disconnects before sending the full response Cluster cluster = mockBootstrapCluster(); try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, cluster)) { Cluster discoveredCluster = mockCluster(0); env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(request -> request instanceof MetadataRequest, null, true); env.kafkaClient().prepareResponse(request -> request instanceof MetadataRequest, new MetadataResponse(discoveredCluster.nodes(), discoveredCluster.clusterResource().clusterId(), 1, Collections.emptyList())); env.kafkaClient().prepareResponse(body -> body instanceof CreateTopicsRequest, new CreateTopicsResponse(Collections.singletonMap("myTopic", new ApiError(Errors.NONE, "")))); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all(); future.get(); } }
@Test public void testDescribeAcls() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); // Test a call where we get back ACL1 and ACL2. env.kafkaClient().prepareResponse(new DescribeAclsResponse(0, ApiError.NONE, asList(ACL1, ACL2))); assertCollectionIs(env.adminClient().describeAcls(FILTER1).values().get(), ACL1, ACL2); // Test a call where we get back no results. env.kafkaClient().prepareResponse(new DescribeAclsResponse(0, ApiError.NONE, Collections.<AclBinding>emptySet())); assertTrue(env.adminClient().describeAcls(FILTER2).values().get().isEmpty()); // Test a call where we get back an error. env.kafkaClient().prepareResponse(new DescribeAclsResponse(0, new ApiError(Errors.SECURITY_DISABLED, "Security is disabled"), Collections.<AclBinding>emptySet())); TestUtils.assertFutureError(env.adminClient().describeAcls(FILTER2).values(), SecurityDisabledException.class); // Test a call where we supply an invalid filter. TestUtils.assertFutureError(env.adminClient().describeAcls(UNKNOWN_FILTER).values(), InvalidRequestException.class); } }
@Test public void testCreateTopicsHandleNotControllerException() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponseFrom(new CreateTopicsResponse( Collections.singletonMap("myTopic", new ApiError(Errors.NOT_CONTROLLER, ""))), env.cluster().nodeById(0)); env.kafkaClient().prepareResponse(new MetadataResponse(env.cluster().nodes(), env.cluster().clusterResource().clusterId(), 1, Collections.<MetadataResponse.TopicMetadata>emptyList())); env.kafkaClient().prepareResponseFrom(new CreateTopicsResponse( Collections.singletonMap("myTopic", new ApiError(Errors.NONE, ""))), env.cluster().nodeById(1)); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all(); future.get(); } }