public SimpleParallelizer(QueryContext context, AttemptObserver observer, Collection<NodeEndpoint> activeEndpoints) { this.queryContext = context; OptionManager optionManager = context.getOptions(); long sliceTarget = context.getPlannerSettings().getSliceTarget(); this.parallelizationThreshold = sliceTarget > 0 ? sliceTarget : 1; final long configuredMaxWidthPerNode = context.getClusterResourceInformation().getAverageExecutorCores(optionManager); final double maxWidthFactor = context.getWorkStatsProvider().get().getMaxWidthFactor(); this.maxWidthPerNode = (int) Math.max(1, configuredMaxWidthPerNode * maxWidthFactor); if (logger.isDebugEnabled() && maxWidthFactor < 1) { final float clusterLoad = context.getWorkStatsProvider().get().getClusterLoad(); logger.debug("Cluster load {} exceeded cutoff, max_width_factor = {}. current max_width = {}", clusterLoad, maxWidthFactor, this.maxWidthPerNode); } this.executionMap = new ExecutionNodeMap(Optional.ofNullable(activeEndpoints).orElse(context.getActiveEndpoints())); this.maxGlobalWidth = (int) optionManager.getOption(ExecConstants.MAX_WIDTH_GLOBAL); this.affinityFactor = optionManager.getOption(ExecConstants.AFFINITY_FACTOR); this.useNewAssignmentCreator = !optionManager.getOption(ExecConstants.OLD_ASSIGNMENT_CREATOR); this.assignmentCreatorBalanceFactor = optionManager.getOption(ExecConstants.ASSIGNMENT_CREATOR_BALANCE_FACTOR); this.observer = observer; this.fragmentCodec = FragmentCodec.valueOf(optionManager.getOption(ExecConstants.FRAGMENT_CODEC).toUpperCase()); }
ExpansionHelper(QueryContext context) { this.context = Preconditions.checkNotNull(context, "query context required"); converter = new SqlConverter( context.getPlannerSettings(), context.getOperatorTable(), context, MaterializationDescriptorProvider.EMPTY, context.getFunctionRegistry(), context.getSession(), AbstractAttemptObserver.NOOP, context.getCatalog(), context.getSubstitutionProviderFactory(), context.getConfig(), context.getScanResult()); }
protected QueryContext mockQueryContext(SabotContext dbContext) throws Exception { final UserSession userSession = UserSession.Builder.newBuilder().withOptionManager(dbContext.getOptionManager()).build(); final SessionOptionManager sessionOptions = (SessionOptionManager) userSession.getOptions(); final QueryOptionManager queryOptions = new QueryOptionManager(sessionOptions); final ExecutionControls executionControls = new ExecutionControls(queryOptions, NodeEndpoint.getDefaultInstance()); final OperatorTable table = new OperatorTable(FUNCTIONS()); final LogicalPlanPersistence lp = dbContext.getLpPersistence(); final CatalogService registry = dbContext.getCatalogService(); final QueryContext context = Mockito.mock(QueryContext.class); when(context.getSession()).thenReturn(userSession); when(context.getLpPersistence()).thenReturn(lp); when(context.getCatalogService()).thenReturn(registry); when(context.getFunctionRegistry()).thenReturn(FUNCTIONS()); when(context.getSession()).thenReturn(UserSession.Builder.newBuilder().setSupportComplexTypes(true).build()); when(context.getCurrentEndpoint()).thenReturn(NodeEndpoint.getDefaultInstance()); when(context.getActiveEndpoints()).thenReturn(ImmutableList.of(NodeEndpoint.getDefaultInstance())); when(context.getPlannerSettings()).thenReturn(new PlannerSettings(dbContext.getConfig(), queryOptions, dbContext.getClusterResourceInformation())); when(context.getOptions()).thenReturn(queryOptions); when(context.getConfig()).thenReturn(DEFAULT_SABOT_CONFIG); when(context.getOperatorTable()).thenReturn(table); when(context.getAllocator()).thenReturn(allocator); when(context.getExecutionControls()).thenReturn(executionControls); when(context.getMaterializationProvider()).thenReturn(Mockito.mock(MaterializationDescriptorProvider.class)); return context; }
if (context.getPlannerSettings().isHashJoinSwapEnabled()) { phyRelNode = SwapHashJoinVisitor.swapHashJoin(phyRelNode, context.getPlannerSettings() .getHashJoinSwapMarginFactor()); phyRelNode = SimpleLimitExchangeRemover.apply(config.getContext().getPlannerSettings(), phyRelNode); long targetSliceSize = config.getContext().getPlannerSettings().getSliceTarget(); phyRelNode = ExcessiveExchangeIdentifier.removeExcessiveEchanges(phyRelNode, targetSliceSize); if (context.getPlannerSettings().isGlobalDictionariesEnabled()) { phyRelNode = GlobalDictionaryVisitor.useGlobalDictionaries(phyRelNode);
final HepPlanner hepPlanner = new DremioHepPlanner(hepPgmBldr.build(), config.getContext().getPlannerSettings(), converter.getCostFactory(), phase); hepPlanner.setExecutor(new ConstExecutor(converter.getFunctionImplementationRegistry(), converter.getFunctionContext(), converter.getSettings()));
if(join.getJoinType() == JoinType.CROSS && !context.getPlannerSettings().isCrossJoinEnabled()) { unsupportedOperatorCollector.setException(SqlUnsupportedException.ExceptionType.RELATIONAL, "Dremio doesn't currently support CROSS JOIN."); if (checkDirExplorers(sqlSelect.getWhere()) && !context.getPlannerSettings().isConstantFoldingEnabled()) { unsupportedOperatorCollector.setException(SqlUnsupportedException.ExceptionType.FUNCTION, "Directory explorers " + dirExplorers + " functions can not be used " +
private SqlConverter getNewConverter(QueryContext context, SqlQuery query, AttemptObserver observerForSubstitution) { Catalog catalog = context.getCatalog(); final List<String> sqlContext = query.getContext(); if(sqlContext != null){ NamespaceKey path = new NamespaceKey(sqlContext); try { catalog = catalog.resolveCatalog(path); } catch (Exception e) { throw UserException.validationError(e) .message("Unable to resolve schema path [%s]. Failure resolving [%s] portion of path.", sqlContext, path) .build(logger); } } return new SqlConverter( context.getPlannerSettings(), context.getOperatorTable(), context, context.getMaterializationProvider(), context.getFunctionRegistry(), context.getSession(), observerForSubstitution, catalog, context.getSubstitutionProviderFactory(), context.getConfig(), context.getScanResult()); }
final boolean leafLimitEnabled = config.getContext().getPlannerSettings().isLeafLimitsEnabled();
final AttemptObserver observer = new PassthroughQueryObserver(ExecTest.mockUserClientConnection(null)); final SqlConverter converter = new SqlConverter( queryContext.getPlannerSettings(), queryContext.getOperatorTable(), queryContext,
protected ElasticsearchGroupScan generate(String sql) throws Exception { AttemptObserver observer = new PassthroughQueryObserver(ExecTest.mockUserClientConnection(null)); SqlConverter converter = new SqlConverter(context.getPlannerSettings(), context.getOperatorTable(), context, context.getMaterializationProvider(), context.getFunctionRegistry(), context.getSession(), observer, context.getCatalog(), context.getSubstitutionProviderFactory(), context.getConfig(), context.getScanResult()); SqlNode node = converter.parse(sql); SqlHandlerConfig config = new SqlHandlerConfig(context, converter, observer, null); NormalHandler handler = new NormalHandler(); PhysicalPlan plan = handler.getPlan(config, sql, node); List<PhysicalOperator> operators = plan.getSortedOperators(); ElasticsearchGroupScan scan = find(operators); assertNotNull("Physical plan does not contain an elasticsearch scan for query: " + sql, scan); return scan; }
final AttemptObserver observer = new PassthroughQueryObserver(ExecTest.mockUserClientConnection(null)); final SqlConverter converter = new SqlConverter( queryContext.getPlannerSettings(), queryContext.getOperatorTable(), queryContext,
private CommandRunner<?> getSqlCommand(String sql, boolean isPrepare) { try{ final SqlConverter parser = new SqlConverter( context.getPlannerSettings(), context.getOperatorTable(), context,