@Test public void shouldImplementHashCodeProperly() { final Collection<ConfigEntry> entries = new ArrayList<>(); entries.add(E1); assertThat(config.hashCode(), is(config.hashCode())); assertThat(config.hashCode(), is(new Config(config.entries()).hashCode())); assertThat(config.hashCode(), is(not(new Config(entries).hashCode()))); }
@Test public void shouldImplementEqualsProperly() { final Collection<ConfigEntry> entries = new ArrayList<>(); entries.add(E1); assertThat(config, is(equalTo(config))); assertThat(config, is(equalTo(new Config(config.entries())))); assertThat(config, is(not(equalTo(new Config(entries))))); assertThat(config, is(not(equalTo((Object) "this")))); }
@Before public void setUp() { final Collection<ConfigEntry> entries = new ArrayList<>(); entries.add(E1); entries.add(E2); config = new Config(entries); }
@Override public DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, DescribeConfigsOptions options) { Map<ConfigResource, KafkaFuture<Config>> configescriptions = new HashMap<>(); for (ConfigResource resource : resources) { if (resource.type() == ConfigResource.Type.TOPIC) { Map<String, String> configs = allTopics.get(resource.name()).configs; List<ConfigEntry> configEntries = new ArrayList<>(); for (Map.Entry<String, String> entry : configs.entrySet()) { configEntries.add(new ConfigEntry(entry.getKey(), entry.getValue())); } KafkaFutureImpl<Config> future = new KafkaFutureImpl<>(); future.complete(new Config(configEntries)); configescriptions.put(resource, future); } else { throw new UnsupportedOperationException("Not implemented yet"); } } return new DescribeConfigsResult(configescriptions); }
for (ConfigResource resource : resources) { List<AlterConfigsRequest.ConfigEntry> configEntries = new ArrayList<>(); for (ConfigEntry configEntry: configs.get(resource).entries()) configEntries.add(new AlterConfigsRequest.ConfigEntry(configEntry.name(), configEntry.value())); requestMap.put(resource, new AlterConfigsRequest.Config(configEntries));
@Override void handleResponse(AbstractResponse abstractResponse) { DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse; for (Map.Entry<ConfigResource, KafkaFutureImpl<Config>> entry : unifiedRequestFutures.entrySet()) { ConfigResource configResource = entry.getKey(); KafkaFutureImpl<Config> future = entry.getValue(); DescribeConfigsResponse.Config config = response.config(configResource); if (config == null) { future.completeExceptionally(new UnknownServerException( "Malformed broker response: missing config for " + configResource)); continue; } if (config.error().isFailure()) { future.completeExceptionally(config.error().exception()); continue; } List<ConfigEntry> configEntries = new ArrayList<>(); for (DescribeConfigsResponse.ConfigEntry configEntry : config.entries()) { configEntries.add(new ConfigEntry(configEntry.name(), configEntry.value(), configSource(configEntry.source()), configEntry.isSensitive(), configEntry.isReadOnly(), configSynonyms(configEntry))); } future.complete(new Config(configEntries)); } }
private Map<String, String> topicConfiguration(String topicName) { Map<String, String> configMap = Collections.emptyMap(); try { ConfigResource cr = new ConfigResource(ConfigResource.Type.TOPIC, topicName); DescribeConfigsResult dc = adminClient.describeConfigs(singleton(cr)); Map<ConfigResource, Config> configs = dc.all().get(); Config config = configs.get(cr); configMap = config.entries().stream().filter(x -> !x.isDefault() && !x.isReadOnly()).collect(toMap(x -> x.name(), x -> x.value())); log.debug("Existing configuration for topic {} is {}", topicName, configMap); } catch (InterruptedException | ExecutionException e) { log.warn("Exception occured during topic configuration retrieval. name: {}", topicName, e); } return configMap; }
@Test public void shouldReturnNullOnGetUnknownEntry() { assertThat(config.get("unknown"), is(nullValue())); }
@Override void handleResponse(AbstractResponse abstractResponse) { DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse; DescribeConfigsResponse.Config config = response.configs().get(resource); if (config == null) { brokerFuture.completeExceptionally(new UnknownServerException( "Malformed broker response: missing config for " + resource)); return; } if (config.error().isFailure()) brokerFuture.completeExceptionally(config.error().exception()); else { List<ConfigEntry> configEntries = new ArrayList<>(); for (DescribeConfigsResponse.ConfigEntry configEntry : config.entries()) { configEntries.add(new ConfigEntry(configEntry.name(), configEntry.value(), configSource(configEntry.source()), configEntry.isSensitive(), configEntry.isReadOnly(), configSynonyms(configEntry))); } brokerFuture.complete(new Config(configEntries)); } }
private List<ConfigItem> describeResource(final ConfigResource configResource) { final DescribeConfigsResult result = adminClient.describeConfigs(Collections.singleton(configResource)); final List<ConfigItem> configItems = new ArrayList<>(); try { final Map<ConfigResource, Config> configMap = result.all().get(); final Config config = configMap.get(configResource); for (final ConfigEntry configEntry : config.entries()) { // Skip sensitive entries if (configEntry.isSensitive()) { continue; } configItems.add( new ConfigItem(configEntry.name(), configEntry.value(), configEntry.isDefault()) ); } return configItems; } catch (InterruptedException | ExecutionException e) { // TODO Handle this throw new RuntimeException(e.getMessage(), e); } }
@Override public void initializeStorage() { super.initializeStorage(); try (AdminClient admin = AdminClient.create(this.producerConfig.asProperties())) { // Find default replication factor Config brokerConfig = getKafkaBrokerConfig(admin); final short replicationFactor = Short.parseShort(brokerConfig.get(DEFAULT_TOPIC_REPLICATION_FACTOR_PROP_NAME).value()); // Create topic final NewTopic topic = new NewTopic(topicName, (short)1, replicationFactor); topic.configs(Collect.hashMapOf("cleanup.policy", "delete", "retention.ms", Long.toString(Long.MAX_VALUE), "retention.bytes", "-1")); admin.createTopics(Collections.singleton(topic)); logger.info("Database history topic '{}' created", topic); } catch (Exception e) { throw new ConnectException("Creation of database history topic failed, please create the topic manually", e); } }
/** * Return a singleton map from the topic {@link ConfigResource} for the given topic, * to the {@link Config} of the given topic. */ public static Map<ConfigResource, Config> toTopicConfig(Topic topic) { Set<ConfigEntry> configEntries = new HashSet<>(); for (Map.Entry<String, String> entry : topic.getConfig().entrySet()) { configEntries.add(new ConfigEntry(entry.getKey(), entry.getValue())); } Config config = new Config(configEntries); return Collections.singletonMap( new ConfigResource(ConfigResource.Type.TOPIC, topic.getTopicName().toString()), config); }
/** * Create a Topic to reflect the given TopicMetadata. */ public static Topic fromTopicMetadata(TopicMetadata meta) { if (meta == null) { return null; } Topic.Builder builder = new Topic.Builder() .withTopicName(meta.getDescription().name()) .withNumPartitions(meta.getDescription().partitions().size()) .withNumReplicas((short) meta.getDescription().partitions().get(0).replicas().size()) .withMetadata(null); for (ConfigEntry entry: meta.getConfig().entries()) { if (!entry.isDefault()) { builder.withConfigEntry(entry.name(), entry.value()); } } return builder.build(); }
@Override public void initializeStorage() { super.initializeStorage(); try (AdminClient admin = AdminClient.create(this.producerConfig.asProperties())) { // Find default replication factor Config brokerConfig = getKafkaBrokerConfig(admin); final short replicationFactor = Short.parseShort(brokerConfig.get(DEFAULT_TOPIC_REPLICATION_FACTOR_PROP_NAME).value()); // Create topic final NewTopic topic = new NewTopic(topicName, (short)1, replicationFactor); topic.configs(Collect.hashMapOf("cleanup.policy", "delete", "retention.ms", Long.toString(Long.MAX_VALUE), "retention.bytes", "-1")); admin.createTopics(Collections.singleton(topic)); logger.info("Database history topic '{}' created", topic); } catch (Exception e) { throw new ConnectException("Creation of database history topic failed, please create the topic manually", e); } }
/** * Modify configuration values for a specific topic. * @param topic The topic to modify. * @param configItems Map of Key to Value to modify. * @return boolean */ public TopicConfig alterTopicConfig(final String topic, final Map<String, String> configItems) { try { // Define the resource we want to modify, the topic. final ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, topic); final List<ConfigEntry> configEntries = new ArrayList<>(); for (final Map.Entry<String, String> entry : configItems.entrySet()) { configEntries.add( new ConfigEntry(entry.getKey(), entry.getValue()) ); } // Define the configuration set final Config config = new Config(configEntries); // Create the topic final AlterConfigsResult result = adminClient.alterConfigs(Collections.singletonMap(configResource, config)); // Wait for the async request to process. result.all().get(); // Lets return updated topic details return getTopicConfig(topic); } catch (final InterruptedException | ExecutionException exception) { // TODO Handle this throw new RuntimeException(exception.getMessage(), exception); } }
private void updateTopic(Topic topic) { log.debug("Topic exists. name {}", topic.getName()); TopicWithParitions oldTopic = topic(topic.getName()); if (topic.getPartitions() > oldTopic.getPartitions()) { adminClient.createPartitions(singletonMap(topic.getName(), NewPartitions.increaseTo(topic.getPartitions()))); changedTopics.inc(); log.info("Updated topic. name: {}, new partitions: {}", topic.getName(), topic.getPartitions()); } else if (topic.getPartitions() < oldTopic.getPartitions()) { log.warn("Unable to reduce number of partitions. name: {}, requested partitions: {}, original partitions {}", topic.getName(), topic.getPartitions(), oldTopic.getPartitions()); } if (topic.getReplicationFactor() != 0 && topic.getReplicationFactor() != oldTopic.getReplicationFactor()) { log.error("Replication factor change not supported. name: {}, requested replication-factor: {}, original replication-factor {}", topic.getName(), topic.getReplicationFactor(), oldTopic.getReplicationFactor()); } if (topic.getProperties() == null || !topic.getProperties().equals(oldTopic.getProperties())) { log.info("Updating topic properties. name: {}, new properties: {}", topic.getName(), topic.getProperties()); ConfigResource cr = new ConfigResource(ConfigResource.Type.TOPIC, topic.getName()); List<ConfigEntry> entries = new ArrayList<>(); topic.getProperties().forEach((k, v) -> entries.add(new ConfigEntry(k.toString(), v.toString()))); adminClient.alterConfigs(singletonMap(cr, new Config(entries))); changedTopics.inc(); } else { log.debug("Topic properties are same. name: {}, new properties={}, old properties={}", topic.getName(), topic.getProperties(), oldTopic.getProperties()); } }