void maybeReassignPartitionAndElectLeader() throws Exception { KafkaZkClient zkClient = KafkaZkClient.apply(_zkConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, Integer.MAX_VALUE, Time.SYSTEM, METRIC_GROUP_NAME, "SessionExpireListener"); Collection<Broker> brokers = scala.collection.JavaConversions.asJavaCollection(zkClient.getAllBrokersInCluster()); _replicationFactor, currentReplicationFactor, _topic, _zkConnect); if (expectedReplicationFactor > currentReplicationFactor && !zkClient.reassignPartitionsInProgress()) { LOG.info("MultiClusterTopicManagementService will increase the replication factor of the topic {} in cluster {}" + "from {} to {}", _topic, _zkConnect, currentReplicationFactor, expectedReplicationFactor); Properties currentProperties = zkClient.getEntityConfigs(ConfigType.Topic(), _topic); Properties expectedProperties = new Properties(); for (Object key: currentProperties.keySet()) LOG.info("MultiClusterTopicManagementService will overwrite properties of the topic {} " + "in cluster {} from {} to {}.", _topic, _zkConnect, currentProperties, expectedProperties); zkClient.setOrCreateEntityConfigs(ConfigType.Topic(), _topic, expectedProperties); !zkClient.reassignPartitionsInProgress()) { LOG.info("MultiClusterTopicManagementService will reassign partitions of the topic {} in cluster {}", _topic, _zkConnect); reassignPartitions(zkClient, brokers, _topic, partitionInfoList.size(), expectedReplicationFactor); zkClient.close();
private Map<TopicPartition, OffsetAndMetadata> getZookeeperOffsets( KafkaZkClient zkClient, KafkaConsumer<String, byte[]> consumer) { Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); List<PartitionInfo> partitions = consumer.partitionsFor(topicStr); for (PartitionInfo partition : partitions) { TopicPartition topicPartition = new TopicPartition(topicStr, partition.partition()); Option<Object> optionOffset = zkClient.getConsumerOffset(groupId, topicPartition); if (optionOffset.nonEmpty()) { Long offset = (Long) optionOffset.get(); OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(offset); offsets.put(topicPartition, offsetAndMetadata); } } return offsets; }
private static List<PartitionInfo> getPartitionInfo(KafkaZkClient zkClient, String topic) { scala.collection.immutable.Set<String> topicList = new scala.collection.immutable.Set.Set1<>(topic); scala.collection.Map<Object, scala.collection.Seq<Object>> partitionAssignments = zkClient.getPartitionAssignmentForTopics(topicList).apply(topic); List<PartitionInfo> partitionInfoList = new ArrayList<>(); scala.collection.Iterator<scala.Tuple2<Object, scala.collection.Seq<Object>>> it = partitionAssignments.iterator(); while (it.hasNext()) { scala.Tuple2<Object, scala.collection.Seq<Object>> scalaTuple = it.next(); Integer partition = (Integer) scalaTuple._1(); scala.Option<Object> leaderOption = zkClient.getLeaderForPartition(new TopicPartition(topic, partition)); Node leader = leaderOption.isEmpty() ? null : new Node((Integer) leaderOption.get(), "", -1); Node[] replicas = new Node[scalaTuple._2().size()]; for (int i = 0; i < replicas.length; i++) { Integer brokerId = (Integer) scalaTuple._2().apply(i); replicas[i] = new Node(brokerId, "", -1); } partitionInfoList.add(new PartitionInfo(topic, partition, leader, replicas, null)); } return partitionInfoList; }
/** * Generates the Kafka bootstrap connection string from the metadata stored in Zookeeper. * Allows for backwards compatibility of the zookeeperConnect configuration. */ private String lookupBootstrap(String zookeeperConnect, SecurityProtocol securityProtocol) { try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10, Time.SYSTEM, "kafka.server", "SessionExpireListener")) { List<Broker> brokerList = JavaConverters.seqAsJavaListConverter(zkClient.getAllBrokersInCluster()).asJava(); List<BrokerEndPoint> endPoints = brokerList.stream() .map(broker -> broker.brokerEndPoint( ListenerName.forSecurityProtocol(securityProtocol)) ) .collect(Collectors.toList()); List<String> connections = new ArrayList<>(); for (BrokerEndPoint endPoint : endPoints) { connections.add(endPoint.connectionString()); } return StringUtils.join(connections, ','); } }
void maybeAddPartitions(int minPartitionNum) { KafkaZkClient zkClient = KafkaZkClient.apply(_zkConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, Integer.MAX_VALUE, Time.SYSTEM, METRIC_GROUP_NAME, "SessionExpireListener"); AdminZkClient adminZkClient = new AdminZkClient(zkClient); try { scala.collection.Map<Object, scala.collection.Seq<Object>> existingAssignment = getPartitionAssignment(zkClient, _topic); int partitionNum = existingAssignment.size(); if (partitionNum < minPartitionNum) { LOG.info("MultiClusterTopicManagementService will increase partition of the topic {} " + "in cluster {} from {} to {}.", _topic, _zkConnect, partitionNum, minPartitionNum); scala.Option<scala.collection.Map<java.lang.Object, scala.collection.Seq<java.lang.Object>>> replicaAssignment = scala.Option.apply(null); scala.Option<Seq<Object>> brokerList = scala.Option.apply(null); adminZkClient.addPartitions(_topic, existingAssignment, adminZkClient.getBrokerMetadatas(RackAwareMode.Disabled$.MODULE$, brokerList), minPartitionNum, replicaAssignment, false); } } finally { zkClient.close(); } }
private void migrateOffsets() { try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10, Time.SYSTEM, "kafka.server", "SessionExpireListener"); KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(consumerProps)) { Map<TopicPartition, OffsetAndMetadata> kafkaOffsets = getKafkaOffsets(consumer); if (!kafkaOffsets.isEmpty()) { logger.info("Found Kafka offsets for topic {}. Will not migrate from zookeeper", topicStr); logger.debug("Offsets found: {}", kafkaOffsets); return; } logger.info("No Kafka offsets found. Migrating zookeeper offsets"); Map<TopicPartition, OffsetAndMetadata> zookeeperOffsets = getZookeeperOffsets(zkClient, consumer); if (zookeeperOffsets.isEmpty()) { logger.warn("No offsets to migrate found in Zookeeper"); return; } logger.info("Committing Zookeeper offsets to Kafka"); logger.debug("Offsets to commit: {}", zookeeperOffsets); consumer.commitSync(zookeeperOffsets); // Read the offsets to verify they were committed Map<TopicPartition, OffsetAndMetadata> newKafkaOffsets = getKafkaOffsets(consumer); logger.debug("Offsets committed: {}", newKafkaOffsets); if (!newKafkaOffsets.keySet().containsAll(zookeeperOffsets.keySet())) { throw new FlumeException("Offsets could not be committed"); } } }
private boolean isMigrationToNewKafkaTopic(KafkaTopics kafkaTopics) { return kafkaTopics.getSecondary().isPresent() && !kafkaZkClient.topicExists(kafkaTopics.getPrimary().name().asString()); }
private KafkaZkClient kafkaZkClient(ZooKeeperClient zooKeeperClient) { return new KafkaZkClient(zooKeeperClient, false, Time.SYSTEM); }
private boolean isMarkedForDeletion(String kafkaClusterName, KafkaTopic kafkaTopic) { return zkClients.get(kafkaClusterName).isTopicMarkedForDeletion(kafkaTopic.name().asString()); } }
@Override @SuppressWarnings("unchecked") public int readLeaderForPartition(TopicAndPartition topicAndPartition) { try { TopicPartition topicPartition = new TopicPartition(topicAndPartition.topic(), topicAndPartition.partition()); return (int)kafkaZkClient.getLeaderForPartition(topicPartition).get(); } catch (Exception exception) { throw new BrokerNotFoundForPartitionException(topicAndPartition.topic(), topicAndPartition.partition(), exception); } }
private void migrateOffsets(String topicStr) { try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10, Time.SYSTEM, "kafka.server", "SessionExpireListener");
/** * Generates the Kafka bootstrap connection string from the metadata stored in Zookeeper. * Allows for backwards compatibility of the zookeeperConnect configuration. */ private String lookupBootstrap(String zookeeperConnect, SecurityProtocol securityProtocol) { try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10, Time.SYSTEM, "kafka.server", "SessionExpireListener")) { List<Broker> brokerList = JavaConverters.seqAsJavaListConverter(zkClient.getAllBrokersInCluster()).asJava(); List<BrokerEndPoint> endPoints = brokerList.stream() .map(broker -> broker.brokerEndPoint( ListenerName.forSecurityProtocol(securityProtocol)) ) .collect(Collectors.toList()); List<String> connections = new ArrayList<>(); for (BrokerEndPoint endPoint : endPoints) { connections.add(endPoint.connectionString()); } return StringUtils.join(connections, ','); } }
@Override public boolean topicExists(Topic topic) { return kafkaNamesMapper.toKafkaTopics(topic) .allMatch(kafkaTopic -> kafkaZkClient.topicExists(kafkaTopic.name().asString())); }
private BrokerOperations(Map<String, String> kafkaZkConnection, int sessionTimeout, int connectionTimeout, int maxInflightRequests, String namespace) { zkClients = kafkaZkConnection.entrySet().stream() .collect(toMap(Map.Entry::getKey, e -> { ZooKeeperClient zooKeeperClient = new ZooKeeperClient( e.getValue(), connectionTimeout, sessionTimeout, maxInflightRequests, Time.SYSTEM, ZOOKEEPER_METRIC_GROUP, ZOOKEEPER_METRIC_TYPE); return new KafkaZkClient(zooKeeperClient, false, Time.SYSTEM); })); kafkaNamesMapper = new JsonToAvroMigrationKafkaNamesMapper(namespace); }
private boolean isMarkedForDeletion(String kafkaClusterName, KafkaTopic kafkaTopic) { return zkClients.get(kafkaClusterName).isTopicMarkedForDeletion(kafkaTopic.name().asString()); } }
private void migrateOffsets() { try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10, Time.SYSTEM, "kafka.server", "SessionExpireListener"); KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(consumerProps)) { Map<TopicPartition, OffsetAndMetadata> kafkaOffsets = getKafkaOffsets(consumer); if (!kafkaOffsets.isEmpty()) { logger.info("Found Kafka offsets for topic {}. Will not migrate from zookeeper", topicStr); logger.debug("Offsets found: {}", kafkaOffsets); return; } logger.info("No Kafka offsets found. Migrating zookeeper offsets"); Map<TopicPartition, OffsetAndMetadata> zookeeperOffsets = getZookeeperOffsets(zkClient, consumer); if (zookeeperOffsets.isEmpty()) { logger.warn("No offsets to migrate found in Zookeeper"); return; } logger.info("Committing Zookeeper offsets to Kafka"); logger.debug("Offsets to commit: {}", zookeeperOffsets); consumer.commitSync(zookeeperOffsets); // Read the offsets to verify they were committed Map<TopicPartition, OffsetAndMetadata> newKafkaOffsets = getKafkaOffsets(consumer); logger.debug("Offsets committed: {}", newKafkaOffsets); if (!newKafkaOffsets.keySet().containsAll(zookeeperOffsets.keySet())) { throw new FlumeException("Offsets could not be committed"); } } }
public boolean topicExists(String topicName, String kafkaClusterName) { Topic topic = topic(topicName).build(); return kafkaNamesMapper.toKafkaTopics(topic) .allMatch(kafkaTopic -> zkClients.get(kafkaClusterName).topicExists(kafkaTopic.name().asString()) && !isMarkedForDeletion(kafkaClusterName, kafkaTopic)); }
private BrokerOperations(Map<String, String> kafkaZkConnection, int sessionTimeout, int connectionTimeout, int maxInflightRequests, String namespace) { zkClients = kafkaZkConnection.entrySet().stream() .collect(toMap(Map.Entry::getKey, e -> { ZooKeeperClient zooKeeperClient = new ZooKeeperClient( e.getValue(), connectionTimeout, sessionTimeout, maxInflightRequests, Time.SYSTEM, ZOOKEEPER_METRIC_GROUP, ZOOKEEPER_METRIC_TYPE); return new KafkaZkClient(zooKeeperClient, false, Time.SYSTEM); })); kafkaNamesMapper = new JsonToAvroMigrationKafkaNamesMapper(namespace); }
private Map<TopicPartition, OffsetAndMetadata> getZookeeperOffsets( KafkaZkClient zkClient, KafkaConsumer<String, byte[]> consumer, String topicStr) { Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); List<PartitionInfo> partitions = consumer.partitionsFor(topicStr); for (PartitionInfo partition : partitions) { TopicPartition topicPartition = new TopicPartition(topicStr, partition.partition()); Option<Object> optionOffset = zkClient.getConsumerOffset(groupId, topicPartition); if (optionOffset.nonEmpty()) { Long offset = (Long) optionOffset.get(); OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(offset); offsets.put(topicPartition, offsetAndMetadata); } } return offsets; } }
private void migrateOffsets(String topicStr) { try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10, Time.SYSTEM, "kafka.server", "SessionExpireListener");