private Config getKafkaBrokerConfig(AdminClient admin) throws Exception { final Collection<Node> nodes = admin.describeCluster().nodes().get(KAFKA_QUERY_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); if (nodes.isEmpty()) { throw new ConnectException("No brokers available to obtain default settings"); } String nodeId = nodes.iterator().next().idString(); Set<ConfigResource> resources = Collections.singleton(new ConfigResource(ConfigResource.Type.BROKER, nodeId)); final Map<ConfigResource, Config> configs = admin.describeConfigs(resources).all().get( KAFKA_QUERY_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS ); if (configs.isEmpty()) { throw new ConnectException("No configs have been received"); } return configs.values().iterator().next(); } }
@Override public DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, DescribeConfigsOptions options) { Map<ConfigResource, KafkaFuture<Config>> configescriptions = new HashMap<>(); for (ConfigResource resource : resources) { if (resource.type() == ConfigResource.Type.TOPIC) { Map<String, String> configs = allTopics.get(resource.name()).configs; List<ConfigEntry> configEntries = new ArrayList<>(); for (Map.Entry<String, String> entry : configs.entrySet()) { configEntries.add(new ConfigEntry(entry.getKey(), entry.getValue())); } KafkaFutureImpl<Config> future = new KafkaFutureImpl<>(); future.complete(new Config(configEntries)); configescriptions.put(resource, future); } else { throw new UnsupportedOperationException("Not implemented yet"); } } return new DescribeConfigsResult(configescriptions); }
new ConfigResource(ConfigResource.Type.TOPIC, "foo"))); time.sleep(5000); result2.values().get(new ConfigResource(ConfigResource.Type.TOPIC, "foo")).get();
/** * Get a topic config via the Kafka AdminClient API, calling the given handler * (in a different thread) with the result. */ @Override public void topicMetadata(TopicName topicName, Handler<AsyncResult<TopicMetadata>> handler) { LOGGER.debug("Getting metadata for topic {}", topicName); ConfigResource resource = new ConfigResource(ConfigResource.Type.TOPIC, topicName.toString()); KafkaFuture<TopicDescription> descriptionFuture = adminClient.describeTopics( Collections.singleton(topicName.toString())).values().get(topicName.toString()); KafkaFuture<Config> configFuture = adminClient.describeConfigs( Collections.singleton(resource)).values().get(resource); queueWork(new MetadataWork(descriptionFuture, configFuture, result -> handler.handle(result))); }
@Test public void testDescribeConfigs() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(new DescribeConfigsResponse(0, Collections.singletonMap(new ConfigResource(ConfigResource.Type.BROKER, "0"), new DescribeConfigsResponse.Config(ApiError.NONE, Collections.emptySet())))); DescribeConfigsResult result2 = env.adminClient().describeConfigs(Collections.singleton( new ConfigResource(ConfigResource.Type.BROKER, "0"))); result2.all().get(); } }
allFutures.putAll(brokerFutures); allFutures.putAll(unifiedRequestFutures); return new DescribeConfigsResult(allFutures);
env.adminClient().describeConfigs(Collections.singleton(new ConfigResource(ConfigResource.Type.BROKER, "0"))).all().get(); fail("Expected an authentication error."); } catch (ExecutionException e) {
private Map<String, String> topicConfiguration(String topicName) { Map<String, String> configMap = Collections.emptyMap(); try { ConfigResource cr = new ConfigResource(ConfigResource.Type.TOPIC, topicName); DescribeConfigsResult dc = adminClient.describeConfigs(singleton(cr)); Map<ConfigResource, Config> configs = dc.all().get(); Config config = configs.get(cr); configMap = config.entries().stream().filter(x -> !x.isDefault() && !x.isReadOnly()).collect(toMap(x -> x.name(), x -> x.value())); log.debug("Existing configuration for topic {} is {}", topicName, configMap); } catch (InterruptedException | ExecutionException e) { log.warn("Exception occured during topic configuration retrieval. name: {}", topicName, e); } return configMap; }
private Config getKafkaBrokerConfig(AdminClient admin) throws Exception { final Collection<Node> nodes = admin.describeCluster().nodes().get(KAFKA_QUERY_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); if (nodes.isEmpty()) { throw new ConnectException("No brokers available to obtain default settings"); } String nodeId = nodes.iterator().next().idString(); Set<ConfigResource> resources = Collections.singleton(new ConfigResource(ConfigResource.Type.BROKER, nodeId)); final Map<ConfigResource, Config> configs = admin.describeConfigs(resources).all().get( KAFKA_QUERY_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS ); if (configs.isEmpty()) { throw new ConnectException("No configs have been received"); } return configs.values().iterator().next(); } }
private List<ConfigItem> describeResource(final ConfigResource configResource) { final DescribeConfigsResult result = adminClient.describeConfigs(Collections.singleton(configResource)); final List<ConfigItem> configItems = new ArrayList<>(); try { final Map<ConfigResource, Config> configMap = result.all().get(); final Config config = configMap.get(configResource); for (final ConfigEntry configEntry : config.entries()) { // Skip sensitive entries if (configEntry.isSensitive()) { continue; } configItems.add( new ConfigItem(configEntry.name(), configEntry.value(), configEntry.isDefault()) ); } return configItems; } catch (InterruptedException | ExecutionException e) { // TODO Handle this throw new RuntimeException(e.getMessage(), e); } }