@Override public AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, final AlterConfigsOptions options) { final Map<ConfigResource, KafkaFutureImpl<Void>> allFutures = new HashMap<>(); // We must make a separate AlterConfigs request for every BROKER resource we want to alter // and send the request to that specific broker. Other resources are grouped together into // a single request that may be sent to any broker. final Collection<ConfigResource> unifiedRequestResources = new ArrayList<>(); for (ConfigResource resource : configs.keySet()) { if (resource.type() == ConfigResource.Type.BROKER && !resource.isDefault()) { NodeProvider nodeProvider = new ConstantNodeIdProvider(Integer.parseInt(resource.name())); allFutures.putAll(alterConfigs(configs, options, Collections.singleton(resource), nodeProvider)); } else unifiedRequestResources.add(resource); } if (!unifiedRequestResources.isEmpty()) allFutures.putAll(alterConfigs(configs, options, unifiedRequestResources, new LeastLoadedNodeProvider())); return new AlterConfigsResult(new HashMap<>(allFutures)); }
@Override protected Struct toStruct(short version) { Struct struct = new Struct(ApiKeys.ALTER_CONFIGS.responseSchema(version)); struct.set(THROTTLE_TIME_MS, throttleTimeMs); List<Struct> resourceStructs = new ArrayList<>(errors.size()); for (Map.Entry<ConfigResource, ApiError> entry : errors.entrySet()) { Struct resourceStruct = struct.instance(RESOURCES_KEY_NAME); ConfigResource resource = entry.getKey(); entry.getValue().write(resourceStruct); resourceStruct.set(RESOURCE_TYPE_KEY_NAME, resource.type().id()); resourceStruct.set(RESOURCE_NAME_KEY_NAME, resource.name()); resourceStructs.add(resourceStruct); } struct.set(RESOURCES_KEY_NAME, resourceStructs.toArray(new Struct[0])); return struct; }
@Override protected Struct toStruct() { Struct struct = new Struct(ApiKeys.DESCRIBE_CONFIGS.requestSchema(version())); List<Struct> resourceStructs = new ArrayList<>(resources().size()); for (Map.Entry<ConfigResource, Collection<String>> entry : resourceToConfigNames.entrySet()) { ConfigResource resource = entry.getKey(); Struct resourceStruct = struct.instance(RESOURCES_KEY_NAME); resourceStruct.set(RESOURCE_TYPE_KEY_NAME, resource.type().id()); resourceStruct.set(RESOURCE_NAME_KEY_NAME, resource.name()); String[] configNames = entry.getValue() == null ? null : entry.getValue().toArray(new String[0]); resourceStruct.set(CONFIG_NAMES_KEY_NAME, configNames); resourceStructs.add(resourceStruct); } struct.set(RESOURCES_KEY_NAME, resourceStructs.toArray(new Struct[0])); struct.setIfExists(INCLUDE_SYNONYMS, includeSynonyms); return struct; }
@Override protected Struct toStruct() { Struct struct = new Struct(ApiKeys.ALTER_CONFIGS.requestSchema(version())); struct.set(VALIDATE_ONLY_KEY_NAME, validateOnly); List<Struct> resourceStructs = new ArrayList<>(configs.size()); for (Map.Entry<ConfigResource, Config> entry : configs.entrySet()) { Struct resourceStruct = struct.instance(RESOURCES_KEY_NAME); ConfigResource resource = entry.getKey(); resourceStruct.set(RESOURCE_TYPE_KEY_NAME, resource.type().id()); resourceStruct.set(RESOURCE_NAME_KEY_NAME, resource.name()); Config config = entry.getValue(); List<Struct> configEntryStructs = new ArrayList<>(config.entries.size()); for (ConfigEntry configEntry : config.entries) { Struct configEntriesStruct = resourceStruct.instance(CONFIG_ENTRIES_KEY_NAME); configEntriesStruct.set(CONFIG_NAME, configEntry.name); configEntriesStruct.set(CONFIG_VALUE, configEntry.value); configEntryStructs.add(configEntriesStruct); } resourceStruct.set(CONFIG_ENTRIES_KEY_NAME, configEntryStructs.toArray(new Struct[0])); resourceStructs.add(resourceStruct); } struct.set(RESOURCES_KEY_NAME, resourceStructs.toArray(new Struct[0])); return struct; }
@Override public DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, DescribeConfigsOptions options) { Map<ConfigResource, KafkaFuture<Config>> configescriptions = new HashMap<>(); for (ConfigResource resource : resources) { if (resource.type() == ConfigResource.Type.TOPIC) { Map<String, String> configs = allTopics.get(resource.name()).configs; List<ConfigEntry> configEntries = new ArrayList<>(); for (Map.Entry<String, String> entry : configs.entrySet()) { configEntries.add(new ConfigEntry(entry.getKey(), entry.getValue())); } KafkaFutureImpl<Config> future = new KafkaFutureImpl<>(); future.complete(new Config(configEntries)); configescriptions.put(resource, future); } else { throw new UnsupportedOperationException("Not implemented yet"); } } return new DescribeConfigsResult(configescriptions); }
resourceStruct.set(RESOURCE_NAME_KEY_NAME, resource.name());
final KafkaFutureImpl<Config> brokerFuture = entry.getValue(); final ConfigResource resource = entry.getKey(); final int nodeId = Integer.parseInt(resource.name()); runnable.call(new Call("describeBrokerConfigs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(nodeId)) {