Refine search
public void provisionTopic(String topic) { if (_topicConsumerMap.containsKey(topic)) { // nothing to do: return } else { // provision topic AdminUtils.createTopic(_kafkaServerSuite.getZkClient(), topic, 1, 1, new Properties()); List<KafkaServer> servers = new ArrayList<>(); servers.add(_kafkaServerSuite.getKafkaServer()); kafka.utils.TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000); KafkaConsumerSuite consumerSuite = new KafkaConsumerSuite(_kafkaServerSuite.getZkConnectString(), topic); _topicConsumerMap.put(topic, consumerSuite); } }
@Override public void deleteTestTopic(String topic) { ZkUtils zkUtils = getZkUtils(); try { LOG.info("Deleting topic {}", topic); ZkClient zk = new ZkClient(zookeeperConnectionString, Integer.valueOf(standardProps.getProperty("zookeeper.session.timeout.ms")), Integer.valueOf(standardProps.getProperty("zookeeper.connection.timeout.ms")), new ZooKeeperStringSerializer()); AdminUtils.deleteTopic(zkUtils, topic); zk.close(); } finally { zkUtils.close(); } }
/** * @param zkServers Zookeeper server string: host1:port1[,host2:port2,...] * @param topic topic to create (if not already existing) * @param partitions number of topic partitions * @param topicProperties optional topic config properties */ public static void maybeCreateTopic(String zkServers, String topic, int partitions, Properties topicProperties) { ZkUtils zkUtils = ZkUtils.apply(zkServers, ZK_TIMEOUT_MSEC, ZK_TIMEOUT_MSEC, false); try { if (AdminUtils.topicExists(zkUtils, topic)) { log.info("No need to create topic {} as it already exists", topic); } else { log.info("Creating topic {} with {} partition(s)", topic, partitions); try { AdminUtils.createTopic( zkUtils, topic, partitions, 1, topicProperties, RackAwareMode.Enforced$.MODULE$); log.info("Created topic {}", topic); } catch (TopicExistsException re) { log.info("Topic {} already exists", topic); } } } finally { zkUtils.close(); } }
/** * @param zkServers Zookeeper server string: host1:port1[,host2:port2,...] * @param topic topic to delete, if it exists */ public static void deleteTopic(String zkServers, String topic) { ZkUtils zkUtils = ZkUtils.apply(zkServers, ZK_TIMEOUT_MSEC, ZK_TIMEOUT_MSEC, false); try { if (AdminUtils.topicExists(zkUtils, topic)) { log.info("Deleting topic {}", topic); AdminUtils.deleteTopic(zkUtils, topic); log.info("Deleted Zookeeper topic {}", topic); } else { log.info("No need to delete topic {} as it does not exist", topic); } } finally { zkUtils.close(); } }
private void ensureTopicCreated(ZkUtils zkUtils, Set<String> allTopics, String topic, long retentionMs, int replicationFactor, int partitionCount) { Properties props = new Properties(); props.setProperty(LogConfig.RetentionMsProp(), Long.toString(retentionMs)); props.setProperty(LogConfig.CleanupPolicyProp(), DEFAULT_CLEANUP_POLICY); if (!allTopics.contains(topic)) { AdminUtils.createTopic(zkUtils, topic, partitionCount, replicationFactor, props, RackAwareMode.Safe$.MODULE$); } else { try { AdminUtils.changeTopicConfig(zkUtils, topic, props); MetadataResponse.TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(JavaConversions.asScalaSet(Collections.singleton(topic)), zkUtils, ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)).head(); maybeIncreaseTopicReplicationFactor(zkUtils, topicMetadata, replicationFactor, topic); maybeIncreaseTopicPartitionCount(zkUtils, topic, topicMetadata, partitionCount); } catch (RuntimeException re) { LOG.error("Skip updating topic " + topic + " configuration due to failure:" + re.getMessage() + "."); } } }
public static void createTopicHelper(final String topicName, final int partitions) { Properties topicProps = new Properties(); topicProps.put(TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG, "CreateTime"); topicProps.put(TopicConfig.RETENTION_MS_CONFIG, "-1"); ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(embeddedKafkaCluster.getZkServer().getConnectionString()), false); AdminUtils.createTopic(zkUtils, topicName, partitions, 1, topicProps, RackAwareMode.Disabled$.MODULE$); org.apache.kafka.common.requests.MetadataResponse.TopicMetadata fetchTopicMetadataFromZk = AdminUtils.fetchTopicMetadataFromZk(topicName, zkUtils); logger.info("Topic Metadata: " + fetchTopicMetadataFromZk); }
Properties props = new Properties(); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic); props.setProperty(KafkaWriterConfigurationKeys.REPLICATION_COUNT, topicReplicationCount); props.setProperty(KafkaWriterConfigurationKeys.PARTITION_COUNT, topicPartitionCount ); props.setProperty(KafkaWriterConfigurationKeys.CLUSTER_ZOOKEEPER, liveZookeeper); ZKStringSerializer$.MODULE$); boolean isSecureKafkaCluster = false; ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(liveZookeeper), isSecureKafkaCluster); AdminUtils.fetchTopicMetadataFromZk(topic,zkUtils); Assert.assertEquals(metaData.partitionsMetadata().size(), Integer.parseInt(topicPartitionCount));
public KafkaTestBase(String topic) throws InterruptedException, RuntimeException { startServer(); this.topic = topic; AdminUtils.createTopic(zkClient, topic, 1, 1, new Properties()); List<KafkaServer> servers = new ArrayList<>(); servers.add(kafkaServer); TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000); Properties consumeProps = new Properties(); consumeProps.put("zookeeper.connect", zkConnect); consumeProps.put("group.id", "testConsumer"); consumeProps.put("zookeeper.session.timeout.ms", "10000"); consumeProps.put("zookeeper.sync.time.ms", "10000"); consumeProps.put("auto.commit.interval.ms", "10000"); consumeProps.put("consumer.timeout.ms", "10000"); consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumeProps)); Map<String, Integer> topicCountMap = new HashMap<>(); topicCountMap.put(this.topic, 1); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(this.topic); stream = streams.get(0); iterator = stream.iterator(); }
@Before public void setUp() throws IOException, SQLException { // setup Zookeeper zkServer = new EmbeddedZookeeper(); String zkConnect = ZKHOST + ":" + zkServer.port(); zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$); ZkUtils zkUtils = ZkUtils.apply(zkClient, false); // setup Broker Properties brokerProps = new Properties(); brokerProps.setProperty("zookeeper.connect", zkConnect); brokerProps.setProperty("broker.id", "0"); brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString()); brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT); KafkaConfig config = new KafkaConfig(brokerProps); Time mock = new MockTime(); kafkaServer = TestUtils.createServer(config, mock); kafkaServer.startup(); // create topic AdminUtils.createTopic(zkUtils, TOPIC, 1, 1, new Properties()); pConsumer = new PhoenixConsumer(); Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); conn = DriverManager.getConnection(getUrl(), props); }
public void addTopic(String zookeeperUri, KafkaTopic topic) { ZkClient zkClient = getZkClient(zookeeperUri); Properties topicConfig = new Properties(); for (Entry<String, String> property : topic.getConfig().entrySet()) { topicConfig.setProperty(property.getKey(), property.getValue()); } AdminUtils.createTopic(zkClient, topic.getName(), topic.getNumPartitions(), topic.getReplicationFactor(), topicConfig); }
public static void checkAndCreateTopic(String zkConnect, String topic, int replicas) { ZkClient zkClient = new ZkClient(zkConnect, SESSION_TIMEOUT_MS, CONNECTION_TIMEOUT_MS, ZKStringSerializer$.MODULE$); ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zkConnect), IS_SECURE_KAFKA_CLUSTER); if (AdminUtils.topicExists(zkUtils, topic)) { verifyTopic(zkUtils, topic); return; } int partitions = 1; Properties topicConfig = new Properties(); topicConfig.put(LogConfig.CleanupPolicyProp(), "compact"); AdminUtils.createTopic(zkUtils, topic, partitions, replicas, topicConfig, RackAwareMode.Enforced$.MODULE$); zkClient.close(); zkUtils.close(); }
public void provisionTopic(String topic) { if (_topicConsumerMap.containsKey(topic)) { // nothing to do: return } else { // provision topic AdminUtils.createTopic(ZkUtils.apply(_kafkaServerSuite.getZkClient(), false), topic, 1, 1, new Properties()); List<KafkaServer> servers = new ArrayList<>(); servers.add(_kafkaServerSuite.getKafkaServer()); kafka.utils.TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000); KafkaConsumerSuite consumerSuite = new KafkaConsumerSuite(_kafkaServerSuite.getZkConnectString(), topic); _topicConsumerMap.put(topic, consumerSuite); } }
@Override public void start() { ZkUtils zkUtils = new ZkUtils( new ZkClient(this.zkAddress, 6000, 6000, ZKStringSerializer$.MODULE$), null, false ); try { AdminUtils.createTopic(zkUtils, topic, 1, 1, new Properties()); } catch (TopicExistsException e) { log.info("Topic: {} already exists.", topic); } this.running = true; }
private void createSchemaTopic() throws StoreInitializationException { if (AdminUtils.topicExists(zkUtils, topic)) { verifySchemaTopic(); return; } int numLiveBrokers = brokerSeq.size(); if (numLiveBrokers <= 0) { throw new StoreInitializationException("No live Kafka brokers"); } int schemaTopicReplicationFactor = Math.min(numLiveBrokers, desiredReplicationFactor); if (schemaTopicReplicationFactor < desiredReplicationFactor) { log.warn("Creating the schema topic " + topic + " using a replication factor of " + schemaTopicReplicationFactor + ", which is less than the desired one of " + desiredReplicationFactor + ". If this is a production environment, it's " + "crucial to add more brokers and increase the replication factor of the topic."); } Properties schemaTopicProps = new Properties(); schemaTopicProps.put(LogConfig.CleanupPolicyProp(), "compact"); try { AdminUtils.createTopic(zkUtils, topic, 1, schemaTopicReplicationFactor, schemaTopicProps, RackAwareMode.Enforced$.MODULE$); } catch (TopicExistsException e) { // This is ok. } }
ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect), false); int partitions = ConfigUtils.getInt(config, KafkaWriterConfigurationKeys.PARTITION_COUNT, KafkaWriterConfigurationKeys.PARTITION_COUNT_DEFAULT); int replication = ConfigUtils.getInt(config, KafkaWriterConfigurationKeys.REPLICATION_COUNT, KafkaWriterConfigurationKeys.PARTITION_COUNT_DEFAULT); Properties topicConfig = new Properties(); if(AdminUtils.topicExists(zkUtils, topicName)) { log.debug("Topic"+topicName+" already Exists with replication: "+replication+" and partitions :"+partitions); return; AdminUtils.createTopic(zkUtils, topicName, partitions, replication, topicConfig); } catch (RuntimeException e) { throw new RuntimeException(e);
private void updateTopic(ZkUtils zkUtils, String topic, Map<String, String> configMap) { Properties topicProperties = new Properties(); topicProperties.putAll(configMap); AdminUtils.changeTopicConfig(zkUtils, topic, topicProperties); }
@Override public void changeTopicConfig(String topicName, Map<String, String> topicConfig, Handler<AsyncResult<Void>> completionHandler) { Properties topicConfigProperties = new Properties(); topicConfigProperties.putAll(topicConfig); vertx.executeBlocking(future -> { try { kafka.admin.AdminUtils.changeTopicConfig(initZkClientAndGetZkUtils(), topicName, topicConfigProperties); completionHandler.handle(Future.succeededFuture()); } catch(Exception e) { completionHandler.handle(Future.failedFuture(e.getLocalizedMessage())); } finally { if(autoClose) { zkUtils.close(); } } }, r -> { }); }
private void createTopic(String topic, int partitions, int replicationFactor) { if (!AdminUtils.topicExists(zkUtils, topic)) { AdminUtils.createTopic( zkUtils, topic, partitions, replicationFactor, new Properties(), RackAwareMode.Enforced$.MODULE$); try { Thread.sleep(1000); } catch (InterruptedException e) { e.printStackTrace(); } logger.info("created topic $topic with " + partitions + " partitions and " + replicationFactor + " replicas"); } }
Properties topicConfig = new Properties(); for (Map.Entry<String, Integer> entry : topicParallelismMap.entrySet()) { String topic = entry.getKey(); Integer partitions = entry.getValue(); if (AdminUtils.topicExists(zkUtils, topic)) { int existingPartitions = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils).partitionMetadata() .size(); if (existingPartitions < partitions) { log.info("Topic " + topic + " has higher number of partitions than expected partition count. " + "Hence have to delete the topic and recreate with " + partitions + "partitions."); AdminUtils.deleteTopic(zkUtils, topic); long startTime = System.currentTimeMillis(); while (AdminUtils.topicExists(zkUtils, topic)) { try { TimeUnit.SECONDS.sleep(1); if (!AdminUtils.topicExists(zkUtils, topic)) { (new SafeKafkaInvoker()).createKafkaTopic(bootstrapServerURLs, zkUtils, topicConfig, topic, partitions);
public boolean deleteTopicConfByKey(@TopicExistConstraint String topic, String key) { Properties configs = getTopicPropsFromZk(topic); configs.remove(key); AdminUtils.changeTopicConfig(zkUtils, topic, configs); return getTopicPropsFromZk(topic).get(key) == null; }