private void migrateOffsets() { try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10, Time.SYSTEM, "kafka.server", "SessionExpireListener"); KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(consumerProps)) { Map<TopicPartition, OffsetAndMetadata> kafkaOffsets = getKafkaOffsets(consumer); if (!kafkaOffsets.isEmpty()) { logger.info("Found Kafka offsets for topic {}. Will not migrate from zookeeper", topicStr); logger.debug("Offsets found: {}", kafkaOffsets); return; } logger.info("No Kafka offsets found. Migrating zookeeper offsets"); Map<TopicPartition, OffsetAndMetadata> zookeeperOffsets = getZookeeperOffsets(zkClient, consumer); if (zookeeperOffsets.isEmpty()) { logger.warn("No offsets to migrate found in Zookeeper"); return; } logger.info("Committing Zookeeper offsets to Kafka"); logger.debug("Offsets to commit: {}", zookeeperOffsets); consumer.commitSync(zookeeperOffsets); // Read the offsets to verify they were committed Map<TopicPartition, OffsetAndMetadata> newKafkaOffsets = getKafkaOffsets(consumer); logger.debug("Offsets committed: {}", newKafkaOffsets); if (!newKafkaOffsets.keySet().containsAll(zookeeperOffsets.keySet())) { throw new FlumeException("Offsets could not be committed"); } } }
private void migrateOffsets(String topicStr) { try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10, Time.SYSTEM, "kafka.server", "SessionExpireListener");
/** * Generates the Kafka bootstrap connection string from the metadata stored in Zookeeper. * Allows for backwards compatibility of the zookeeperConnect configuration. */ private String lookupBootstrap(String zookeeperConnect, SecurityProtocol securityProtocol) { try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10, Time.SYSTEM, "kafka.server", "SessionExpireListener")) { List<Broker> brokerList = JavaConverters.seqAsJavaListConverter(zkClient.getAllBrokersInCluster()).asJava(); List<BrokerEndPoint> endPoints = brokerList.stream() .map(broker -> broker.brokerEndPoint( ListenerName.forSecurityProtocol(securityProtocol)) ) .collect(Collectors.toList()); List<String> connections = new ArrayList<>(); for (BrokerEndPoint endPoint : endPoints) { connections.add(endPoint.connectionString()); } return StringUtils.join(connections, ','); } }
void maybeReassignPartitionAndElectLeader() throws Exception { KafkaZkClient zkClient = KafkaZkClient.apply(_zkConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, Integer.MAX_VALUE, Time.SYSTEM, METRIC_GROUP_NAME, "SessionExpireListener");
void maybeAddPartitions(int minPartitionNum) { KafkaZkClient zkClient = KafkaZkClient.apply(_zkConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, Integer.MAX_VALUE, Time.SYSTEM, METRIC_GROUP_NAME, "SessionExpireListener"); AdminZkClient adminZkClient = new AdminZkClient(zkClient); try { scala.collection.Map<Object, scala.collection.Seq<Object>> existingAssignment = getPartitionAssignment(zkClient, _topic); int partitionNum = existingAssignment.size(); if (partitionNum < minPartitionNum) { LOG.info("MultiClusterTopicManagementService will increase partition of the topic {} " + "in cluster {} from {} to {}.", _topic, _zkConnect, partitionNum, minPartitionNum); scala.Option<scala.collection.Map<java.lang.Object, scala.collection.Seq<java.lang.Object>>> replicaAssignment = scala.Option.apply(null); scala.Option<Seq<Object>> brokerList = scala.Option.apply(null); adminZkClient.addPartitions(_topic, existingAssignment, adminZkClient.getBrokerMetadatas(RackAwareMode.Disabled$.MODULE$, brokerList), minPartitionNum, replicaAssignment, false); } } finally { zkClient.close(); } }
private void migrateOffsets() { try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10, Time.SYSTEM, "kafka.server", "SessionExpireListener"); KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(consumerProps)) { Map<TopicPartition, OffsetAndMetadata> kafkaOffsets = getKafkaOffsets(consumer); if (!kafkaOffsets.isEmpty()) { logger.info("Found Kafka offsets for topic {}. Will not migrate from zookeeper", topicStr); logger.debug("Offsets found: {}", kafkaOffsets); return; } logger.info("No Kafka offsets found. Migrating zookeeper offsets"); Map<TopicPartition, OffsetAndMetadata> zookeeperOffsets = getZookeeperOffsets(zkClient, consumer); if (zookeeperOffsets.isEmpty()) { logger.warn("No offsets to migrate found in Zookeeper"); return; } logger.info("Committing Zookeeper offsets to Kafka"); logger.debug("Offsets to commit: {}", zookeeperOffsets); consumer.commitSync(zookeeperOffsets); // Read the offsets to verify they were committed Map<TopicPartition, OffsetAndMetadata> newKafkaOffsets = getKafkaOffsets(consumer); logger.debug("Offsets committed: {}", newKafkaOffsets); if (!newKafkaOffsets.keySet().containsAll(zookeeperOffsets.keySet())) { throw new FlumeException("Offsets could not be committed"); } } }
private void migrateOffsets(String topicStr) { try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10, Time.SYSTEM, "kafka.server", "SessionExpireListener");
/** * Generates the Kafka bootstrap connection string from the metadata stored in Zookeeper. * Allows for backwards compatibility of the zookeeperConnect configuration. */ private String lookupBootstrap(String zookeeperConnect, SecurityProtocol securityProtocol) { try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10, Time.SYSTEM, "kafka.server", "SessionExpireListener")) { List<Broker> brokerList = JavaConverters.seqAsJavaListConverter(zkClient.getAllBrokersInCluster()).asJava(); List<BrokerEndPoint> endPoints = brokerList.stream() .map(broker -> broker.brokerEndPoint( ListenerName.forSecurityProtocol(securityProtocol)) ) .collect(Collectors.toList()); List<String> connections = new ArrayList<>(); for (BrokerEndPoint endPoint : endPoints) { connections.add(endPoint.connectionString()); } return StringUtils.join(connections, ','); } }
public static void createTopic(String kafkaTopic, String zkStr) { // TopicCommand.main() will call System.exit() finally, which will break maven-surefire-plugin try { String[] args = new String[]{"--create", "--zookeeper", zkStr, "--replication-factor", "1", "--partitions", "1", "--topic", kafkaTopic}; KafkaZkClient zkClient = KafkaZkClient.apply(zkStr, false, 30000, 30000, Integer.MAX_VALUE, Time.SYSTEM,"kafka.server", "SessionExpireListener"); TopicCommand.TopicCommandOptions opts = new TopicCommand.TopicCommandOptions(args); TopicCommand.createTopic(zkClient, opts); } catch (TopicExistsException e) { // Catch TopicExistsException otherwise it will break maven-surefire-plugin System.out.println("Topic already existed"); } } }
public static void createTopic(String kafkaTopic, String zkStr) { // TopicCommand.main() will call System.exit() finally, which will break maven-surefire-plugin try { String[] args = new String[]{"--create", "--zookeeper", zkStr, "--replication-factor", "1", "--partitions", "1", "--topic", kafkaTopic}; KafkaZkClient zkClient = KafkaZkClient.apply(zkStr, false, 30000, 30000, Integer.MAX_VALUE, Time.SYSTEM,"kafka.server", "SessionExpireListener"); TopicCommand.TopicCommandOptions opts = new TopicCommand.TopicCommandOptions(args); TopicCommand.createTopic(zkClient, opts); } catch (TopicExistsException e) { // Catch TopicExistsException otherwise it will break maven-surefire-plugin System.out.println("Topic already existed"); } } }
public static void createTopic(String kafkaTopic, String zkStr) { // TopicCommand.main() will call System.exit() finally, which will break maven-surefire-plugin try { String[] args = new String[]{"--create", "--zookeeper", zkStr, "--replication-factor", "1", "--partitions", "1", "--topic", kafkaTopic}; KafkaZkClient zkClient = KafkaZkClient.apply(zkStr, false, 30000, 30000, Integer.MAX_VALUE, Time.SYSTEM,"kafka.server", "SessionExpireListener"); TopicCommand.TopicCommandOptions opts = new TopicCommand.TopicCommandOptions(args); TopicCommand.createTopic(zkClient, opts); } catch (TopicExistsException e) { // Catch TopicExistsException otherwise it will break maven-surefire-plugin System.out.println("Topic already existed"); } } }