/** * Helper function to compute group level given children groups * * @param childrenGroups a list of children group ids * @return group level */ private Integer getGroupLevel(List<String> childrenGroups) throws InvalidConfigException { // If no children exists, the group belongs to the base level. if (childrenGroups == null || childrenGroups.isEmpty()) { return DEFAULT_GROUP_LEVEL; } for (Map.Entry<Integer, Map<String, List<String>>> entry : _levelToGroupToSegmentsMap.entrySet()) { Integer currentLevel = entry.getKey(); Map<String, List<String>> currentLevelGroupToSegmentsMap = entry.getValue(); if (currentLevelGroupToSegmentsMap.keySet().containsAll(childrenGroups)) { return currentLevel + 1; } } // At this point, not all children groups are covered, cannot add group throw new InvalidConfigException("Cannot compute group level because not all children groups exist " + "in the segment merge lineage, table name: " + _tableNameWithType + ", children groups: " + childrenGroups + "table"); }
/** * Add segment merge lineage information * * @param groupId a group id * @param currentGroupSegments a list of segments that belongs to the group * @param childrenGroups a list of children groups that the current group covers. All children group ids has to be * from the same group level. */ public void addSegmentGroup(String groupId, List<String> currentGroupSegments, List<String> childrenGroups) throws InvalidConfigException { // Get group level Integer groupLevel = getGroupLevel(childrenGroups); Map<String, List<String>> groupToSegmentMap = _levelToGroupToSegmentsMap.computeIfAbsent(groupLevel, k -> new HashMap<>()); if (groupToSegmentMap.containsKey(groupId) || _parentGroupToChildrenGroupsMap.containsKey(groupId)) { throw new InvalidConfigException("Group id : " + groupId + " already exists for table " + _tableNameWithType); } // Update group to segments map groupToSegmentMap.put(groupId, new ArrayList<>(currentGroupSegments)); _levelToGroupToSegmentsMap.put(groupLevel, groupToSegmentMap); // Update segment group lineage map if (groupLevel > DEFAULT_GROUP_LEVEL) { _parentGroupToChildrenGroupsMap.put(groupId, new ArrayList<>(childrenGroups)); } LOGGER.info("New group has been added successfully to the segment lineage. (tableName: {}, groupId: {}, " + "currentGroupSegments: {}, childrenGroups: {}", _tableNameWithType, groupId, currentGroupSegments, childrenGroups); }
private void addFilePath(List<String> inputPaths, String path) throws Exception { File pathFile = new File(path); if (!pathFile.exists()) { throw new InvalidConfigException("Invalid input path: " + pathFile); } if (pathFile.isFile()) { // If the input is file, add to input path list inputPaths.add(pathFile.getAbsolutePath()); return; } if (pathFile.isDirectory()) { if (isPinotSegment(pathFile)) { // If the directory is pinot index dir, add to input path list inputPaths.add(pathFile.getAbsolutePath()); } else { // If the directory is not pinot index dir, recursively find the pinot segment file or directory File[] files = pathFile.listFiles(); assert files != null; for (File file : files) { addFilePath(inputPaths, file.getAbsolutePath()); } } } } }
/** * Rebalance the segments for replica group tables. * @param idealState old ideal state * @param tableConfig table config of table tor rebalance * @param rebalanceUserConfig custom user configs for specific rebalance strategies * @param newPartitionAssignment new rebalaned partition assignments as part of the resource rebalance * @return a rebalanced idealstate */ @Override public IdealState getRebalancedIdealState(IdealState idealState, TableConfig tableConfig, Configuration rebalanceUserConfig, PartitionAssignment newPartitionAssignment) throws InvalidConfigException { // Currently, only offline table is supported if (tableConfig.getTableType() == CommonConstants.Helix.TableType.REALTIME) { throw new InvalidConfigException("Realtime table is not supported by replica group rebalancer"); } ReplicaGroupPartitionAssignment newReplicaGroupPartitionAssignment = (ReplicaGroupPartitionAssignment) newPartitionAssignment; return rebalanceSegments(idealState, tableConfig, newReplicaGroupPartitionAssignment); }
/** * Assigns new segments to instances by referring to the partition assignment * @param newSegments segments to assign * @param partitionAssignment partition assignment for the table to which the segments belong * @return map of segment name to instances list */ public Map<String, List<String>> assign(Collection<String> newSegments, PartitionAssignment partitionAssignment) throws InvalidConfigException { Map<String, List<String>> segmentAssignment = new HashMap<>(newSegments.size()); for (String segmentName : newSegments) { if (LLCSegmentName.isLowLevelConsumerSegmentName(segmentName)) { LLCSegmentName llcSegmentName = new LLCSegmentName(segmentName); int partitionId = llcSegmentName.getPartitionId(); List<String> instancesListForPartition = partitionAssignment.getInstancesListForPartition(String.valueOf(partitionId)); if (instancesListForPartition == null) { throw new InvalidConfigException( "No partition assignment " + partitionId + " found for segment " + segmentName); } segmentAssignment.put(segmentName, instancesListForPartition); } } return segmentAssignment; } }
String message = String.format("Admin port is missing for host: %s", helixInstanceConfig.getHostName()); LOGGER.error(message); throw new InvalidConfigException(message);
/** * Generates stream partition assignment for given table, using tagged hosts and num partitions */ public PartitionAssignment generateStreamPartitionAssignment(TableConfig tableConfig, int numPartitions) throws InvalidConfigException { // TODO: add an override which can read from znode, instead of generating on the fly List<String> partitions = new ArrayList<>(numPartitions); for (int i = 0; i < numPartitions; i++) { partitions.add(String.valueOf(i)); } String tableNameWithType = tableConfig.getTableName(); int numReplicas = tableConfig.getValidationConfig().getReplicasPerPartitionNumber(); List<String> consumingTaggedInstances = getConsumingTaggedInstances(tableConfig); if (consumingTaggedInstances.size() < numReplicas) { throw new InvalidConfigException( "Not enough consuming instances tagged. Must be atleast equal to numReplicas:" + numReplicas); } /** * TODO: We will use only uniform assignment for now * This will be refactored as AssignmentStrategy interface and implementations UniformAssignment, BalancedAssignment etc * {@link StreamPartitionAssignmentGenerator} and AssignmentStrategy interface will together replace * StreamPartitionAssignmentGenerator and StreamPartitionAssignmentStrategy */ return uniformAssignment(tableNameWithType, partitions, numReplicas, consumingTaggedInstances); }
public PinotResourceManagerResponse rebuildBrokerResourceFromHelixTags(String tableNameWithType) throws Exception { TableConfig tableConfig; try { tableConfig = ZKMetadataProvider.getTableConfig(_propertyStore, tableNameWithType); } catch (Exception e) { LOGGER.warn("Caught exception while getting table config for table {}", tableNameWithType, e); throw new InvalidTableConfigException( "Failed to fetch broker tag for table " + tableNameWithType + " due to exception: " + e.getMessage()); } if (tableConfig == null) { LOGGER.warn("Table " + tableNameWithType + " does not exist"); throw new InvalidConfigException( "Invalid table configuration for table " + tableNameWithType + ". Table does not exist"); } return rebuildBrokerResource(tableNameWithType, getAllInstancesForBrokerTenant(tableConfig.getTenantConfig().getBroker())); }
throw new InvalidConfigException("Realtime table is not supported by replica group rebalancer");
public List<File> convert() throws Exception { // Convert the input segments based on merge type List<File> convertedSegments; switch (_mergeType) { case CONCATENATE: convertedSegments = concatenateSegments(); break; case ROLLUP: // Fetch schema from segment metadata Schema schema = new SegmentMetadataImpl(_inputIndexDirs.get(0)).getSchema(); convertedSegments = rollupSegments(schema); break; default: throw new InvalidConfigException("Invalid merge type : " + _mergeType); } return convertedSegments; }
throw new InvalidConfigException("This table is not using replica group segment assignment"); throw new InvalidConfigException("Partition level replica group rebalance is not supported"); throw new InvalidConfigException( "Invalid input config (numReplicaGroup: " + targetNumReplicaGroup + ", " + "numInstancesPerPartition: " + targetNumInstancesPerPartition + ", numServers: " + serverInstances.size() + ")"); partitionAssignmentGenerator.getReplicaGroupPartitionAssignment(tableNameWithType); if (oldReplicaGroupPartitionAssignment == null) { throw new InvalidConfigException("Replica group partition assignment does not exist for " + tableNameWithType); + ", numRemovedServers: " + removedServers.size() + " )"; LOGGER.info(errorMessage); throw new InvalidConfigException(errorMessage);
throw new InvalidConfigException("Currently, only CONCATENATE merge type is supported");