@Override public QueryContext get() { final UserSession session = systemSession(getOptionManager()); return new QueryContext(session, sabotContext.get(), new AttemptId().toQueryId()); } };
ExpansionHelper(QueryContext context) { this.context = Preconditions.checkNotNull(context, "query context required"); converter = new SqlConverter( context.getPlannerSettings(), context.getOperatorTable(), context, MaterializationDescriptorProvider.EMPTY, context.getFunctionRegistry(), context.getSession(), AbstractAttemptObserver.NOOP, context.getCatalog(), context.getSubstitutionProviderFactory(), context.getConfig(), context.getScanResult()); }
/** * To facilitate generating workunits * with the assumption that QueryContext is NOT null * it's not always going to be true, since e.g. QueryContextInfo * may change between ctor and this method * @param options * @param reader * @param rootNode * @param planningSet * @return * @throws ExecutionSetupException */ private List<PlanFragment> generateWorkUnit( OptionList options, PhysicalPlanReader reader, Fragment rootNode, PlanningSet planningSet) throws ExecutionSetupException { Preconditions.checkNotNull(queryContext); return generateWorkUnit(options, queryContext.getCurrentEndpoint(), queryContext.getQueryId(), reader, rootNode, planningSet, queryContext.getSession(), queryContext.getQueryContextInfo(), queryContext.getFunctionRegistry()); }
@Override public CreatePreparedStatementResp execute() { return PreparedStatementProvider.build(plan.getRoot().getSchema(context.getFunctionRegistry()), state, context.getQueryId(), context.getSession().getCatalogName()); }
private CommandRunner<?> getSqlCommand(String sql, boolean isPrepare) { try{ final SqlConverter parser = new SqlConverter( context.getPlannerSettings(), context.getOperatorTable(), context, context.getMaterializationProvider(), context.getFunctionRegistry(), context.getSession(), observer, context.getCatalog(), context.getSubstitutionProviderFactory(), context.getConfig(), context.getScanResult()); injector.injectChecked(context.getExecutionControls(), "sql-parsing", ForemanSetupException.class); final DremioCatalogReader reader = parser.getCatalogReader(); final Catalog catalog = context.getCatalog(); final SqlNode sqlNode = parser.parse(sql); final SqlHandlerConfig config = new SqlHandlerConfig(context, parser, observer, parser.getMaterializations()); return direct.create(new SetOptionHandler(context.getSession())); return direct.create(new ShowTablesHandler(catalog)); } else if (sqlNode instanceof SqlUseSchema) { return direct.create(new UseSchemaHandler(context.getSession(), catalog)); } else if (sqlNode instanceof SqlCreateReflection) { return direct.create(new AccelCreateReflectionHandler(catalog, context.getAccelerationManager())); } else if (sqlNode instanceof SqlAddExternalReflection) { return direct.create(new AccelAddExternalReflectionHandler(catalog, context.getAccelerationManager()));
protected QueryContext mockQueryContext(SabotContext dbContext) throws Exception { final UserSession userSession = UserSession.Builder.newBuilder().withOptionManager(dbContext.getOptionManager()).build(); final SessionOptionManager sessionOptions = (SessionOptionManager) userSession.getOptions(); final QueryOptionManager queryOptions = new QueryOptionManager(sessionOptions); final ExecutionControls executionControls = new ExecutionControls(queryOptions, NodeEndpoint.getDefaultInstance()); final OperatorTable table = new OperatorTable(FUNCTIONS()); final LogicalPlanPersistence lp = dbContext.getLpPersistence(); final CatalogService registry = dbContext.getCatalogService(); final QueryContext context = Mockito.mock(QueryContext.class); when(context.getSession()).thenReturn(userSession); when(context.getLpPersistence()).thenReturn(lp); when(context.getCatalogService()).thenReturn(registry); when(context.getFunctionRegistry()).thenReturn(FUNCTIONS()); when(context.getSession()).thenReturn(UserSession.Builder.newBuilder().setSupportComplexTypes(true).build()); when(context.getCurrentEndpoint()).thenReturn(NodeEndpoint.getDefaultInstance()); when(context.getActiveEndpoints()).thenReturn(ImmutableList.of(NodeEndpoint.getDefaultInstance())); when(context.getPlannerSettings()).thenReturn(new PlannerSettings(dbContext.getConfig(), queryOptions, dbContext.getClusterResourceInformation())); when(context.getOptions()).thenReturn(queryOptions); when(context.getConfig()).thenReturn(DEFAULT_SABOT_CONFIG); when(context.getOperatorTable()).thenReturn(table); when(context.getAllocator()).thenReturn(allocator); when(context.getExecutionControls()).thenReturn(executionControls); when(context.getMaterializationProvider()).thenReturn(Mockito.mock(MaterializationDescriptorProvider.class)); return context; }
Collection<NodeEndpoint> endpoints = queryContext.getActiveEndpoints(); if(endpoints.isEmpty()){ throw UserException.resourceError().message("No executors currently available.").build(logger); final SimpleParallelizer parallelizer = new SimpleParallelizer(queryContext, observer); final OptionList fragmentOptions = queryContext.getNonDefaultOptions(); CoordExecRPC.QueryContextInformation queryContextInformation = queryContext.getQueryContextInfo(); final OptionManager options = queryContext.getOptions(); final boolean memoryControlEnabled = options.getOption(BasicResourceConstants.ENABLE_QUEUE_MEMORY_LIMIT); final long memoryLimit = (queueType == QueueType.SMALL) ? options.getOption(BasicResourceConstants.LARGE_QUEUE_MEMORY_LIMIT); if (memoryControlEnabled && memoryLimit > 0) { final long queryMaxAllocation = queryContext.getQueryContextInfo().getQueryMaxAllocation(); queryContextInformation = CoordExecRPC.QueryContextInformation.newBuilder(queryContextInformation) .setQueryMaxAllocation(Math.min(memoryLimit, queryMaxAllocation)).build(); queryContext.getCurrentEndpoint(), queryContext.getQueryId(), endpoints, reader, rootOperatorFragment, queryContext.getSession(), queryContextInformation, queryContext.getFunctionRegistry());
OptionManager queryOptions = context.getOptions(); if (context.getPlannerSettings().isHashJoinSwapEnabled()) { phyRelNode = SwapHashJoinVisitor.swapHashJoin(phyRelNode, context.getPlannerSettings() .getHashJoinSwapMarginFactor()); context.getOperatorTable(), context.getFunctionRegistry()), null); phyRelNode = SimpleLimitExchangeRemover.apply(config.getContext().getPlannerSettings(), phyRelNode); long targetSliceSize = config.getContext().getPlannerSettings().getSliceTarget(); phyRelNode = ExcessiveExchangeIdentifier.removeExcessiveEchanges(phyRelNode, targetSliceSize); if (!context.getSession().isSupportComplexTypes()) { logger.debug("Client does not support complex types, add ComplexToJson operator."); phyRelNode = ComplexToJsonPrelVisitor.addComplexToJsonPrel(phyRelNode); if (context.getPlannerSettings().isGlobalDictionariesEnabled()) { phyRelNode = GlobalDictionaryVisitor.useGlobalDictionaries(phyRelNode);
private OperatorContextImpl createContext(Writer writer) { BufferAllocator allocator = context.getAllocator().newChildAllocator("direct-command", 0, Long.MAX_VALUE); final OperatorStats stats = new OperatorStats(new OpProfileDef(0,0,0), allocator); final OperatorContextImpl oc = new OperatorContextImpl( context.getConfig(), FragmentHandle.newBuilder().setQueryId(context.getQueryId()).setMajorFragmentId(0).setMinorFragmentId(0).build(), writer, allocator, allocator, null, stats, null, null, context.getFunctionRegistry(), null, context.getOptions(), context.getNamespaceService(), null, NodeDebugContextProvider.NOOP, 60000, null, ImmutableList.of()); return oc; } }
final SqlRefreshReflection materialize = SqlNodeUtil.unwrap(sqlNode, SqlRefreshReflection.class); if(!SystemUser.SYSTEM_USERNAME.equals(config.getContext().getQueryUserName())) { throw SqlExceptionHelper.parseError("$MATERIALIZE not supported.", sql, materialize.getParserPosition()).build(logger); ReflectionService service = config.getContext().getAccelerationManager().unwrap(ReflectionService.class); namespace, new ExtendedToRelContext(config.getConverter()), config.getContext().getConfig(), reflectionSettings, materializationStore); final UserBitShared.QueryId queryId = config.getContext().getQueryId(); final AttemptId attemptId = AttemptId.of(queryId); drel, config.getContext().getCatalog().createNewTable( new NamespaceKey(tablePath), getWriterOptions(0, goal, fields), ImmutableMap.of()
@SuppressWarnings("static-method") @Test public void uncheckedInjection() { // set exceptions via a string final String exceptionDesc = "<<injected from descPassthroughMethod()>>"; final String exceptionClassName = "java.lang.RuntimeException"; final String jsonString = "{\"injections\":[{" + "\"type\":\"exception\"," + "\"siteClass\":\"com.dremio.exec.testing.TestExceptionInjection$DummyClass\"," + "\"desc\":\"" + exceptionDesc + "\"," + "\"nSkip\":0," + "\"nFire\":1," + "\"exceptionClass\":\"" + exceptionClassName + "\"" + "}]}"; ControlsInjectionUtil.setControls(session, jsonString); final QueryContext context = new QueryContext(session, nodes[0].getContext(), QueryId.getDefaultInstance()); // test that the exception gets thrown final DummyClass dummyClass = new DummyClass(context); assertPassthroughThrows(dummyClass, exceptionClassName, exceptionDesc); try { context.close(); } catch (Exception e) { fail(); } }
final SqlCompactMaterialization compact = SqlNodeUtil.unwrap(sqlNode, SqlCompactMaterialization.class); if(!SystemUser.SYSTEM_USERNAME.equals(config.getContext().getQueryUserName())) { throw SqlExceptionHelper.parseError("$COMPACT REFRESH not supported.", sql, compact.getParserPosition()) .build(logger); ReflectionService service = config.getContext().getAccelerationManager().unwrap(ReflectionService.class); final long ringCount = config.getContext().getOptions().getOption(PlannerSettings.RING_COUNT); final Rel writerDrel = new WriterRel(drel.getCluster(), drel.getCluster().traitSet().plus(Rel.LOGICAL), drel, config.getContext().getCatalog().createNewTable( new NamespaceKey(ReflectionUtils.getMaterializationPath(newMaterialization)), getWriterOptions((int) ringCount, goal, fields), ImmutableMap.of()),
@Before public void setUp() { MockitoAnnotations.initMocks(this); // Boilerplate AccelerationManager accelerationManager = mock(AccelerationManager.class); AccelerationDetailsPopulator populator = mock(AccelerationDetailsPopulator.class); when(populator.computeAcceleration()).thenReturn(ByteString.EMPTY_BYTE_ARRAY); when(accelerationManager.newPopulator()).thenReturn(populator); when(context.getAccelerationManager()).thenReturn(accelerationManager); when(context.getQueryUserName()).thenReturn("myuser"); when(context.getSession()).thenReturn(UserSession.Builder.newBuilder().build()); when(context.getNonDefaultOptions()).thenReturn(new OptionList()); when(catalog.getMetadataStatsCollector()).thenReturn(new MetadataStatsCollector()); }
public SimpleParallelizer(QueryContext context, AttemptObserver observer, Collection<NodeEndpoint> activeEndpoints) { this.queryContext = context; OptionManager optionManager = context.getOptions(); long sliceTarget = context.getPlannerSettings().getSliceTarget(); this.parallelizationThreshold = sliceTarget > 0 ? sliceTarget : 1; final long configuredMaxWidthPerNode = context.getClusterResourceInformation().getAverageExecutorCores(optionManager); final double maxWidthFactor = context.getWorkStatsProvider().get().getMaxWidthFactor(); this.maxWidthPerNode = (int) Math.max(1, configuredMaxWidthPerNode * maxWidthFactor); if (logger.isDebugEnabled() && maxWidthFactor < 1) { final float clusterLoad = context.getWorkStatsProvider().get().getClusterLoad(); logger.debug("Cluster load {} exceeded cutoff, max_width_factor = {}. current max_width = {}", clusterLoad, maxWidthFactor, this.maxWidthPerNode); } this.executionMap = new ExecutionNodeMap(Optional.ofNullable(activeEndpoints).orElse(context.getActiveEndpoints())); this.maxGlobalWidth = (int) optionManager.getOption(ExecConstants.MAX_WIDTH_GLOBAL); this.affinityFactor = optionManager.getOption(ExecConstants.AFFINITY_FACTOR); this.useNewAssignmentCreator = !optionManager.getOption(ExecConstants.OLD_ASSIGNMENT_CREATOR); this.assignmentCreatorBalanceFactor = optionManager.getOption(ExecConstants.ASSIGNMENT_CREATOR_BALANCE_FACTOR); this.observer = observer; this.fragmentCodec = FragmentCodec.valueOf(optionManager.getOption(ExecConstants.FRAGMENT_CODEC).toUpperCase()); }
final Quoting quoting = Optional.ofNullable(context.getSession().getInitialQuoting()).orElse(ParserConfig.QUOTING); final List<String> storeTable = new StrTokenizer(storeTablePath, '.', quoting.string.charAt(0)) storeTable.add(QueryIdHelper.getQueryId(context.getQueryId())); final CreateTableEntry createTableEntry = context.getCatalog() .resolveCatalog(SystemUser.SYSTEM_USERNAME) .createNewTable(new NamespaceKey(storeTable), WriterOptions.DEFAULT, storageOptions);
public CommandRunner<?> toCommand() throws ForemanException { injector.injectChecked(context.getExecutionControls(), "run-try-beginning", ForemanException.class); switch(request.getType()){ case GET_CATALOGS: return new MetadataProvider.CatalogsProvider(context.getQueryId(), context.getSession(), dbContext, request.unwrap(GetCatalogsReq.class)); return new MetadataProvider.ColumnsProvider(context.getQueryId(), context.getSession(), dbContext, request.unwrap(GetColumnsReq.class)); return new MetadataProvider.SchemasProvider(context.getQueryId(), context.getSession(), dbContext, request.unwrap(GetSchemasReq.class)); return new MetadataProvider.TablesProvider(context.getQueryId(), context.getSession(), dbContext, request.unwrap(GetTablesReq.class)); return new ServerMetaProvider.ServerMetaCommandRunner(context.getQueryId(), context.getSession(), dbContext, request.unwrap(GetServerMetaReq.class)); PreparedPlan plan = plans.getIfPresent(handle); if(plan != null){ if (!context.getOptions().getOption(REUSE_PREPARE_HANDLES)) { plans.invalidate(handle); Preconditions.checkArgument( plan.getUsername() .equals(context.getSession() .getCredentials() .getUserName()));
private boolean initialCanVectorize(PhysicalPlanCreator creator, PhysicalOperator child){ if(!creator.getContext().getOptions().getOption(ExecConstants.ENABLE_VECTORIZED_HASHAGG)){ return false; final BatchSchema childSchema = child.getSchema(creator.getContext().getFunctionRegistry()); LogicalExpression expr = ExpressionTreeMaterializer.materializeAndCheckErrors(ne.getExpr(), childSchema, creator.getContext().getFunctionRegistry()); final LogicalExpression expr = ExpressionTreeMaterializer.materializeAndCheckErrors(ne.getExpr(), childSchema, creator.getContext().getFunctionRegistry());
this.observers = AttemptObservers.of(observer); final OptionManager optionManager = this.queryContext.getOptions(); if(options != null){ options.applyOptions(optionManager); optionManager.getOption(PlannerSettings.INCLUDE_DATASET_PROFILE), this.queryContext.getCatalog());
public QueryManager( final QueryId queryId, final QueryContext context, final CoordToExecTunnelCreator tunnelCreator, final CompletionListener completionListener, final Pointer<QueryId> prepareId, final AttemptObservers observers, final boolean verboseProfiles, final boolean includeDatasetProfiles, final Catalog catalog) { this.queryId = queryId; this.tunnelCreator = tunnelCreator; this.completionListener = completionListener; this.context = context; this.prepareId = prepareId; this.catalog = catalog; this.nonDefaultOptions = context.getNonDefaultOptions(); resourceAllocationResultObserver = new ResourceAllocationResultObserver(); observers.add(resourceAllocationResultObserver); capturer = new PlanCaptureAttemptObserver(verboseProfiles, includeDatasetProfiles, context.getFunctionRegistry(), context.getAccelerationManager().newPopulator()); observers.add(capturer); observers.add(new TimeMarker()); }
final OptionManager options = context.getOptions(); final StoreQueryResultsPolicy storeQueryResultsPolicy = Optional .ofNullable(options.getOption(STORE_QUERY_RESULTS.getOptionName())) storeTable.add(QueryIdHelper.getQueryId(context.getQueryId())); final CreateTableEntry createTableEntry = context.getCatalog() .resolveCatalog(SystemUser.SYSTEM_USERNAME) .createNewTable(new NamespaceKey(storeTable), writerOptions, storageOptions);