/** * Constructor for exception block. */ public IntermediateResultsBlock(@Nonnull ProcessingException processingException, @Nonnull Exception e) { _processingExceptions = new ArrayList<>(); _processingExceptions.add(QueryException.getException(processingException, e)); }
/** * Helper function to compute group level given children groups * * @param childrenGroups a list of children group ids * @return group level */ private Integer getGroupLevel(List<String> childrenGroups) throws InvalidConfigException { // If no children exists, the group belongs to the base level. if (childrenGroups == null || childrenGroups.isEmpty()) { return DEFAULT_GROUP_LEVEL; } for (Map.Entry<Integer, Map<String, List<String>>> entry : _levelToGroupToSegmentsMap.entrySet()) { Integer currentLevel = entry.getKey(); Map<String, List<String>> currentLevelGroupToSegmentsMap = entry.getValue(); if (currentLevelGroupToSegmentsMap.keySet().containsAll(childrenGroups)) { return currentLevel + 1; } } // At this point, not all children groups are covered, cannot add group throw new InvalidConfigException("Cannot compute group level because not all children groups exist " + "in the segment merge lineage, table name: " + _tableNameWithType + ", children groups: " + childrenGroups + "table"); }
@Override public void fetchSegmentToLocal(final String uri, final File tempFile) throws Exception { RetryPolicies.exponentialBackoffRetryPolicy(_retryCount, _retryWaitMs, 5).attempt(() -> { try { int statusCode = _httpClient.downloadFile(new URI(uri), tempFile); _logger.info("Downloaded file from: {} to: {}; Length of downloaded file: {}; Response status code: {}", uri, tempFile, tempFile.length(), statusCode); return true; } catch (HttpErrorStatusException e) { int statusCode = e.getStatusCode(); if (statusCode >= 500) { // Temporary exception _logger.warn("Caught temporary exception while downloading file from: {}, will retry", uri, e); return false; } else { // Permanent exception _logger.error("Caught permanent exception while downloading file from: {}, won't retry", uri, e); throw e; } } catch (Exception e) { _logger.warn("Caught temporary exception while downloading file from: {}, will retry", uri, e); return false; } }); }
private SimpleHttpResponse sendRequest(HttpUriRequest request) throws IOException, HttpErrorStatusException { try (CloseableHttpResponse response = _httpClient.execute(request)) { String controllerHost = null; String controllerVersion = null; if (response.containsHeader(CommonConstants.Controller.HOST_HTTP_HEADER)) { controllerHost = response.getFirstHeader(CommonConstants.Controller.HOST_HTTP_HEADER).getValue(); controllerVersion = response.getFirstHeader(CommonConstants.Controller.VERSION_HTTP_HEADER).getValue(); } if (controllerHost != null) { LOGGER.info(String .format("Sending request: %s to controller: %s, version: %s", request.getURI(), controllerHost, controllerVersion)); } int statusCode = response.getStatusLine().getStatusCode(); if (statusCode >= 300) { throw new HttpErrorStatusException(getErrorMessage(request, response), statusCode); } return new SimpleHttpResponse(statusCode, EntityUtils.toString(response.getEntity())); } }
@Nonnull public RebalanceResult rebalanceTable(final String rawTableName, TableType tableType, Configuration rebalanceUserConfig) throws InvalidConfigException, TableNotFoundException { TableConfig tableConfig = getTableConfig(rawTableName, tableType); if (tableConfig == null) { throw new TableNotFoundException("Table " + rawTableName + " of type " + tableType.toString() + " not found"); } String tableNameWithType = tableConfig.getTableName(); RebalanceResult result; try { RebalanceSegmentStrategy rebalanceSegmentsStrategy = RebalanceSegmentStrategyFactory.getInstance().getRebalanceSegmentsStrategy(tableConfig); result = _tableRebalancer.rebalance(tableConfig, rebalanceSegmentsStrategy, rebalanceUserConfig); } catch (InvalidConfigException e) { LOGGER.error("Exception in rebalancing config for table {}", tableNameWithType, e); throw e; } return result; }
.getMessage(), Response.Status.INTERNAL_SERVER_ERROR);
private void addFilePath(List<String> inputPaths, String path) throws Exception { File pathFile = new File(path); if (!pathFile.exists()) { throw new InvalidConfigException("Invalid input path: " + pathFile); } if (pathFile.isFile()) { // If the input is file, add to input path list inputPaths.add(pathFile.getAbsolutePath()); return; } if (pathFile.isDirectory()) { if (isPinotSegment(pathFile)) { // If the directory is pinot index dir, add to input path list inputPaths.add(pathFile.getAbsolutePath()); } else { // If the directory is not pinot index dir, recursively find the pinot segment file or directory File[] files = pathFile.listFiles(); assert files != null; for (File file : files) { addFilePath(inputPaths, file.getAbsolutePath()); } } } } }
@GET @Path("pql") public String get(@QueryParam("pql") String pqlQuery, @QueryParam("trace") String traceEnabled, @Context HttpHeaders httpHeaders) { try { LOGGER.debug("Trace: {}, Running query: {}", traceEnabled, pqlQuery); return getQueryResponse(pqlQuery, traceEnabled, httpHeaders); } catch (Exception e) { LOGGER.error("Caught exception while processing get request", e); return QueryException.getException(QueryException.INTERNAL_ERROR, e).toString(); } }
/** * Rebalance the segments for replica group tables. * @param idealState old ideal state * @param tableConfig table config of table tor rebalance * @param rebalanceUserConfig custom user configs for specific rebalance strategies * @param newPartitionAssignment new rebalaned partition assignments as part of the resource rebalance * @return a rebalanced idealstate */ @Override public IdealState getRebalancedIdealState(IdealState idealState, TableConfig tableConfig, Configuration rebalanceUserConfig, PartitionAssignment newPartitionAssignment) throws InvalidConfigException { // Currently, only offline table is supported if (tableConfig.getTableType() == CommonConstants.Helix.TableType.REALTIME) { throw new InvalidConfigException("Realtime table is not supported by replica group rebalancer"); } ReplicaGroupPartitionAssignment newReplicaGroupPartitionAssignment = (ReplicaGroupPartitionAssignment) newPartitionAssignment; return rebalanceSegments(idealState, tableConfig, newReplicaGroupPartitionAssignment); }
LOGGER.error("Caught exception while merging two blocks (step 2).", e); mergedBlock .addToProcessingExceptions(QueryException.getException(QueryException.MERGE_RESPONSE_ERROR, e));
/** * Add segment merge lineage information * * @param groupId a group id * @param currentGroupSegments a list of segments that belongs to the group * @param childrenGroups a list of children groups that the current group covers. All children group ids has to be * from the same group level. */ public void addSegmentGroup(String groupId, List<String> currentGroupSegments, List<String> childrenGroups) throws InvalidConfigException { // Get group level Integer groupLevel = getGroupLevel(childrenGroups); Map<String, List<String>> groupToSegmentMap = _levelToGroupToSegmentsMap.computeIfAbsent(groupLevel, k -> new HashMap<>()); if (groupToSegmentMap.containsKey(groupId) || _parentGroupToChildrenGroupsMap.containsKey(groupId)) { throw new InvalidConfigException("Group id : " + groupId + " already exists for table " + _tableNameWithType); } // Update group to segments map groupToSegmentMap.put(groupId, new ArrayList<>(currentGroupSegments)); _levelToGroupToSegmentsMap.put(groupLevel, groupToSegmentMap); // Update segment group lineage map if (groupLevel > DEFAULT_GROUP_LEVEL) { _parentGroupToChildrenGroupsMap.put(groupId, new ArrayList<>(childrenGroups)); } LOGGER.info("New group has been added successfully to the segment lineage. (tableName: {}, groupId: {}, " + "currentGroupSegments: {}, childrenGroups: {}", _tableNameWithType, groupId, currentGroupSegments, childrenGroups); }
@POST @Path("pql") public String post(String requestJsonStr, @Context HttpHeaders httpHeaders) { try { JsonNode requestJson = JsonUtils.stringToJsonNode(requestJsonStr); String pqlQuery = requestJson.get("pql").asText(); String traceEnabled = "false"; if (requestJson.has("trace")) { traceEnabled = requestJson.get("trace").toString(); } LOGGER.debug("Trace: {}, Running query: {}", traceEnabled, pqlQuery); return getQueryResponse(pqlQuery, traceEnabled, httpHeaders); } catch (Exception e) { LOGGER.error("Caught exception while processing post request", e); return QueryException.getException(QueryException.INTERNAL_ERROR, e).toString(); } }
public List<File> convert() throws Exception { // Convert the input segments based on merge type List<File> convertedSegments; switch (_mergeType) { case CONCATENATE: convertedSegments = concatenateSegments(); break; case ROLLUP: // Fetch schema from segment metadata Schema schema = new SegmentMetadataImpl(_inputIndexDirs.get(0)).getSchema(); convertedSegments = rollupSegments(schema); break; default: throw new InvalidConfigException("Invalid merge type : " + _mergeType); } return convertedSegments; }
@Override public void runJob() { IntermediateResultsBlock mergedBlock = null; try { for (Operator operator : operatorGroup) { IntermediateResultsBlock blockToMerge = (IntermediateResultsBlock) operator.nextBlock(); if (mergedBlock == null) { mergedBlock = blockToMerge; } else { try { CombineService.mergeTwoBlocks(_brokerRequest, mergedBlock, blockToMerge); } catch (Exception e) { LOGGER.error("Caught exception while merging two blocks (step 1).", e); mergedBlock .addToProcessingExceptions(QueryException.getException(QueryException.MERGE_RESPONSE_ERROR, e)); } } } } catch (Exception e) { LOGGER.error("Caught exception while executing query.", e); mergedBlock = new IntermediateResultsBlock(e); } assert mergedBlock != null; blockingQueue.offer(mergedBlock); } });
/** * Assigns new segments to instances by referring to the partition assignment * @param newSegments segments to assign * @param partitionAssignment partition assignment for the table to which the segments belong * @return map of segment name to instances list */ public Map<String, List<String>> assign(Collection<String> newSegments, PartitionAssignment partitionAssignment) throws InvalidConfigException { Map<String, List<String>> segmentAssignment = new HashMap<>(newSegments.size()); for (String segmentName : newSegments) { if (LLCSegmentName.isLowLevelConsumerSegmentName(segmentName)) { LLCSegmentName llcSegmentName = new LLCSegmentName(segmentName); int partitionId = llcSegmentName.getPartitionId(); List<String> instancesListForPartition = partitionAssignment.getInstancesListForPartition(String.valueOf(partitionId)); if (instancesListForPartition == null) { throw new InvalidConfigException( "No partition assignment " + partitionId + " found for segment " + segmentName); } segmentAssignment.put(segmentName, instancesListForPartition); } } return segmentAssignment; } }
serverInstance, e); _brokerMetrics.addMeteredTableValue(tableNameWithType, BrokerMeter.DATA_TABLE_DESERIALIZATION_EXCEPTIONS, 1); processingExceptions.add(QueryException.getException(QueryException.DATA_TABLE_DESERIALIZATION_ERROR, e));
/** * Generates stream partition assignment for given table, using tagged hosts and num partitions */ public PartitionAssignment generateStreamPartitionAssignment(TableConfig tableConfig, int numPartitions) throws InvalidConfigException { // TODO: add an override which can read from znode, instead of generating on the fly List<String> partitions = new ArrayList<>(numPartitions); for (int i = 0; i < numPartitions; i++) { partitions.add(String.valueOf(i)); } String tableNameWithType = tableConfig.getTableName(); int numReplicas = tableConfig.getValidationConfig().getReplicasPerPartitionNumber(); List<String> consumingTaggedInstances = getConsumingTaggedInstances(tableConfig); if (consumingTaggedInstances.size() < numReplicas) { throw new InvalidConfigException( "Not enough consuming instances tagged. Must be atleast equal to numReplicas:" + numReplicas); } /** * TODO: We will use only uniform assignment for now * This will be refactored as AssignmentStrategy interface and implementations UniformAssignment, BalancedAssignment etc * {@link StreamPartitionAssignmentGenerator} and AssignmentStrategy interface will together replace * StreamPartitionAssignmentGenerator and StreamPartitionAssignmentStrategy */ return uniformAssignment(tableNameWithType, partitions, numReplicas, consumingTaggedInstances); }
@Test public void testException() throws IOException { Exception exception = new UnsupportedOperationException("Caught exception."); ProcessingException processingException = QueryException.getException(QueryException.QUERY_EXECUTION_ERROR, exception); String expected = processingException.getMessage(); DataTable dataTable = new DataTableImplV2(); dataTable.addException(processingException); DataTable newDataTable = DataTableFactory.getDataTable(dataTable.toBytes()); Assert.assertNull(newDataTable.getDataSchema()); Assert.assertEquals(newDataTable.getNumberOfRows(), 0); String actual = newDataTable.getMetadata() .get(DataTable.EXCEPTION_METADATA_KEY + QueryException.QUERY_EXECUTION_ERROR.getErrorCode()); Assert.assertEquals(actual, expected); }
throw new InvalidConfigException("Realtime table is not supported by replica group rebalancer");
public PinotResourceManagerResponse rebuildBrokerResourceFromHelixTags(String tableNameWithType) throws Exception { TableConfig tableConfig; try { tableConfig = ZKMetadataProvider.getTableConfig(_propertyStore, tableNameWithType); } catch (Exception e) { LOGGER.warn("Caught exception while getting table config for table {}", tableNameWithType, e); throw new InvalidTableConfigException( "Failed to fetch broker tag for table " + tableNameWithType + " due to exception: " + e.getMessage()); } if (tableConfig == null) { LOGGER.warn("Table " + tableNameWithType + " does not exist"); throw new InvalidConfigException( "Invalid table configuration for table " + tableNameWithType + ". Table does not exist"); } return rebuildBrokerResource(tableNameWithType, getAllInstancesForBrokerTenant(tableConfig.getTenantConfig().getBroker())); }