/** * Updates the given topic's config with the {@link Properties} provided. This is not additive but a full * replacement * * @param topic * the topic to update config for * @param properties * the properties to assign to the topic * @throws IllegalArgumentException * if topic is null, empty or blank, or properties is {@code null} * @throws AdminOperationException * if there is an issue updating the topic config */ public void updateTopicConfig(String topic, Properties properties) { if (StringUtils.isBlank(topic)) throw new IllegalArgumentException("topic cannot be null, empty or blank"); if (properties == null) throw new IllegalArgumentException("properties cannot be null"); LOG.debug("Updating topic config for topic [{}] with config [{}]", topic, properties); try { AdminUtils.changeTopicConfig(zkUtils, topic, properties); } catch (ZkException e) { throw new AdminOperationException("Unable to update configuration for topic: " + topic, e); } }
/** * Retrieves the {@link AdminClient.ConsumerGroupSummary} information from Kafka * * @param consumerGroup * the name of the consumer group * @return the {@link AdminClient.ConsumerGroupSummary} information from Kafka * @throws AdminOperationException * if there is an issue retrieving the consumer group summary */ public AdminClient.ConsumerGroupSummary getConsumerGroupSummary(String consumerGroup) { if (StringUtils.isBlank(consumerGroup)) throw new IllegalArgumentException("consumerGroup cannot be null, empty or blank"); try { return getAdminClient().describeConsumerGroup(consumerGroup); } catch (KafkaException e) { throw new AdminOperationException("Unable to retrieve summary for consumer group: " + consumerGroup, e); } }
Thread.sleep(operationSleep); } catch (InterruptedException e) { throw new AdminOperationException("Interrupted waiting for topic " + topic + " to be deleted", e); throw new AdminOperationException("Timeout waiting for topic " + topic + " to be deleted"); LOG.warn("Topic [{}] to be deleted was not found", topic, e); } catch (ZkException e) { throw new AdminOperationException("Unable to delete topic: " + topic, e);
/** * Returns the {@link Properties} associated to the topic * * @param topic * a Kafka topic * @return the {@link Properties} associated to the topic * @throws IllegalArgumentException * if topic is null, empty or blank * @throws AdminOperationException * if there is an issue reading the topic config */ public Properties getTopicConfig(String topic) { if (StringUtils.isBlank(topic)) throw new IllegalArgumentException("topic cannot be null, empty or blank"); LOG.debug("Fetching topic config for topic [{}]", topic); try { return AdminUtils.fetchEntityConfig(zkUtils, ConfigType.Topic(), topic); } catch (ZkException | KafkaException | IllegalArgumentException e) { throw new AdminOperationException("Unable to retrieve configuration for topic: " + topic, e); } }
/** * Returns the set of all partitions in the Kafka cluster * * @return unmodifiable set of all partitions in the Kafka cluster * @throws AdminOperationException * if there is an issue reading partitions from Kafka */ public Set<TopicAndPartition> getPartitions() { LOG.debug("Retrieving all partitions"); long start = System.currentTimeMillis(); do { // The zkUtils.getAllPartitions(..) can throw ZkNoNodeException if a topic's partitions have not been added to // zookeeper but the topic is listed in zookeeper. Any other zookeeper exception is assumed to be non-transient and // will be rethrown. try { return Collections.unmodifiableSet(convertToJavaSet(zkUtils.getAllPartitions().iterator())); } catch (ZkNoNodeException e) { LOG.debug("Reading partitions had an error", e); } catch (ZkException e) { throw new AdminOperationException("Unable to retrieve all partitions", e); } LOG.debug("Sleeping for {} ms before trying to get all partitions again", operationSleep); try { Thread.sleep(operationSleep); } catch (InterruptedException e) { throw new AdminOperationException("Interrupted while getting partitions", e); } } while (!operationTimedOut(start)); throw new AdminOperationException("Operation timed out trying to get partitions"); }
throw new AdminOperationException("Unable to add partitions to topic: " + topic, e); Thread.sleep(operationSleep); } catch (InterruptedException e) { throw new AdminOperationException("Interrupted waiting for partitions to be added to topic: " + topic, e); throw new AdminOperationException("Timeout waiting for partitions to be added to topic: " + topic);
/** * Returns the set of all topics in the Kafka cluster * * @return unmodifiable set of all topics in the Kafka cluster * * @throws AdminOperationException * if there is an issue retrieving the set of all topics */ public Set<String> getTopics() { LOG.debug("Retrieving all topics"); try { return Collections.unmodifiableSet(convertToJavaSet(zkUtils.getAllTopics().iterator())); } catch (ZkException e) { throw new AdminOperationException("Unable to retrieve all topics", e); } }
/** * Returns the replication factor for the given topic * * @param topic * a Kafka topic * @return the replication factor for the given topic * * @throws IllegalArgumentException * if topic is null, empty or blank * @throws AdminOperationException * if there is an issue retrieving the replication factor */ public int getTopicReplicationFactor(String topic) { if (StringUtils.isBlank(topic)) throw new IllegalArgumentException("topic cannot be null, empty or blank"); try { return convertToJavaSet(zkUtils.getReplicasForPartition(topic, 0).iterator()).size(); } catch (ZkException | KafkaException e) { throw new AdminOperationException("Unable to read replication factor for topic: " + topic, e); } }
throw new AdminOperationException("number of partitions must be larger than 0"); throw new AdminOperationException("replication factor must be larger than 0");
/** * Removes the given {@link Acl}s from the {@link Resource} * * @param acls * the {@link Acl}s to remove * @param resource * the {@link Resource} to remove the {@link Acl}s from * @throws IllegalArgumentException * if acls or resource is {@code null} * @throws AdminOperationException * if there is an issue removing the {@link Acl}s */ public void removeAcls(Set<Acl> acls, Resource resource) { if (acls == null) throw new IllegalArgumentException("acls cannot be null"); if (resource == null) throw new IllegalArgumentException("resource cannot be null"); LOG.debug("Removing ACLs [{}] for resource [{}]", acls, resource); try { getAuthorizer().removeAcls(toImmutableScalaSet(acls), resource); } catch (ZkException e) { throw new AdminOperationException("Unable to remove ACLs for resource: " + resource, e); } }
AdminUtils.createTopic(zkUtils, topic, partitions, replicationFactor, topicConfig, RackAwareMode.Disabled$.MODULE$); } catch (ZkException e) { throw new AdminOperationException("Unable to create topic: " + topic, e); Thread.sleep(operationSleep); } catch (InterruptedException e) { throw new AdminOperationException("Interrupted waiting for topic " + topic + " to be created", e); throw new AdminOperationException("Timeout waiting for topic " + topic + " to be created");
/** * Adds the given {@link Acl}s to the {@link Resource} * * @param acls * the {@link Acl}s to add * @param resource * the {@link Resource} to add the {@link Acl}s to * @throws IllegalArgumentException * if acls or resource is {@code null} * @throws AdminOperationException * if there is an issue adding the {@link Acl}s */ public void addAcls(Set<Acl> acls, Resource resource) { if (acls == null) throw new IllegalArgumentException("acls cannot be null"); if (resource == null) throw new IllegalArgumentException("resource cannot be null"); LOG.debug("Adding ACLs [{}] for resource [{}]", acls, resource); try { getAuthorizer().addAcls(toImmutableScalaSet(acls), resource); } catch (ZkException | IllegalStateException e) { throw new AdminOperationException("Unable to add ACLs for resource: " + resource, e); } }
/** * Returns all {@link Acl}s associated to the given {@link KafkaPrincipal} * * @param principal * the {@link KafkaPrincipal} to look up {@link Acl}s for * @return unmodifiable map of all {@link Acl}s associated to the given {@link KafkaPrincipal} * @throws IllegalArgumentException * if principal is {@code null} * @throws AdminOperationException * if there is an issue reading the {@link Acl}s */ public Map<Resource, Set<Acl>> getAcls(KafkaPrincipal principal) { if (principal == null) throw new IllegalArgumentException("principal cannot be null"); LOG.debug("Fetching all ACLs for principal [{}]", principal); try { return convertKafkaAclMap(getAuthorizer().getAcls(principal)); } catch (ZkException e) { throw new AdminOperationException("Unable to retrieve ACLs for principal: " + principal, e); } }
javaMap = convertToJavaMap(zkUtils.getPartitionsForTopics(new Set1<String>(topic).toSeq()).iterator()); } catch (ZkException | KafkaException e) { throw new AdminOperationException("Unable to retrieve number of partitions for topic: " + topic, e); throw new AdminOperationException("Failed to find any partitions for topic: " + topic); throw new AdminOperationException("Partition count is 0 for topic: " + topic);
/** * Returns all {@link Acl}s defined in the Kafka cluster * * @return unmodifiable map of all {@link Acl}s defined in the Kafka cluster * * @throws AdminOperationException * if there is an issue reading the {@link Acl}s */ public Map<Resource, Set<Acl>> getAcls() { LOG.debug("Fetching all ACLs"); try { return convertKafkaAclMap(getAuthorizer().getAcls()); } catch (ZkException e) { throw new AdminOperationException("Unable to retrieve all ACLs", e); } }
/** * Returns all {@link Acl}s associated to the given {@link Resource} * * @param resource * the {@link Resource} to look up {@link Acl}s for * @return unmodifiable set of all {@link Acl}s associated to the given {@link Resource} * @throws IllegalArgumentException * if resource is {@code null} * @throws AdminOperationException * if there is an issue reading the {@link Acl}s */ public Set<Acl> getAcls(Resource resource) { if (resource == null) throw new IllegalArgumentException("resource cannot be null"); LOG.debug("Fetching all ACLs for resource [{}]", resource); try { return Collections.unmodifiableSet(convertToJavaSet(getAuthorizer().getAcls(resource).iterator())); } catch (ZkException e) { throw new AdminOperationException("Unable to retrieve ACLs for resource: " + resource, e); } }
return Collections.emptyList(); } catch (KafkaException e) { throw new AdminOperationException("Unable to retrieve summaries for consumer group: " + consumerGroup, e);
private static ZkUtils getZkUtils(Properties properties) { if (properties == null) throw new IllegalArgumentException("properties cannot be null"); Tuple2<ZkClient, ZkConnection> tuple; try { ZKConfig zkConfig = new ZKConfig(new VerifiableProperties(properties)); tuple = ZkUtils.createZkClientAndConnection(zkConfig.zkConnect(), zkConfig.zkSessionTimeoutMs(), zkConfig.zkConnectionTimeoutMs()); } catch (ZkException e) { throw new AdminOperationException("Unable to create admin connection", e); } boolean isSecure = Boolean.valueOf(properties.getProperty(ZOOKEEPER_SECURE, DEFAULT_ZOOKEEPER_SECURE)); return new ZkUtils(tuple._1(), tuple._2(), isSecure); }
/** * Returns an {@link Authorizer} to make {@link Acl} requests * * @return an {@link Authorizer} to make {@link Acl} requests * * @throws AdminOperationException * if there is an issue creating the authorizer */ public Authorizer getAuthorizer() { if (authorizer == null) { ZKConfig zkConfig = new ZKConfig(new VerifiableProperties(properties)); Map<String, Object> authorizerProps = new HashMap<>(); authorizerProps.put(ZKConfig.ZkConnectProp(), zkConfig.zkConnect()); authorizerProps.put(ZKConfig.ZkConnectionTimeoutMsProp(), zkConfig.zkConnectionTimeoutMs()); authorizerProps.put(ZKConfig.ZkSessionTimeoutMsProp(), zkConfig.zkSessionTimeoutMs()); authorizerProps.put(ZKConfig.ZkSyncTimeMsProp(), zkConfig.zkSyncTimeMs()); try { Authorizer simpleAclAuthorizer = new SimpleAclAuthorizer(); simpleAclAuthorizer.configure(authorizerProps); authorizer = simpleAclAuthorizer; } catch (ZkException e) { throw new AdminOperationException("Unable to create authorizer", e); } } return authorizer; }