final StringBuilder sb = new StringBuilder(); sb.append("PlanFragments for query "); sb.append(queryContext.getQueryId()); sb.append('\n');
/** * To facilitate generating workunits * with the assumption that QueryContext is NOT null * it's not always going to be true, since e.g. QueryContextInfo * may change between ctor and this method * @param options * @param reader * @param rootNode * @param planningSet * @return * @throws ExecutionSetupException */ private List<PlanFragment> generateWorkUnit( OptionList options, PhysicalPlanReader reader, Fragment rootNode, PlanningSet planningSet) throws ExecutionSetupException { Preconditions.checkNotNull(queryContext); return generateWorkUnit(options, queryContext.getCurrentEndpoint(), queryContext.getQueryId(), reader, rootNode, planningSet, queryContext.getSession(), queryContext.getQueryContextInfo(), queryContext.getFunctionRegistry()); }
storeTable.add(QueryIdHelper.getQueryId(context.getQueryId()));
@Override public double plan() throws Exception { try{ final RecordingObserver recording = new RecordingObserver(); final AttemptObservers observers = AttemptObservers.of(observer, recording); observers.planStart(sql); plan = handler.getPlan(config.cloneWithNewObserver(observers), sql, sqlNode); PreparedPlan prepared = new PreparedPlan(context.getQueryId(), context.getQueryUserName(), sql, plan, recording); final Long handle = PREPARE_ID.getAndIncrement(); state = ServerPreparedStatementState.newBuilder() .setHandle(handle) .setSqlQuery(sql) .setPrepareId(context.getQueryId()) .build(); planCache.put(handle, prepared); // record a partial plan so that we can grab metadata and use it (for example during view creation of via sql). observers.planCompleted(new ExecutionPlan(plan, ImmutableList.of(), ImmutableList.of())); return 1; }catch(Exception ex){ throw SqlExceptionHelper.coerceException(logger, sql, ex, true); } }
@Override public CreatePreparedStatementResp execute() { return PreparedStatementProvider.build(plan.getRoot().getSchema(context.getFunctionRegistry()), state, context.getQueryId(), context.getSession().getCatalogName()); }
@Override public CreatePreparedStatementResp execute() throws Exception { ServerPreparedStatementState state = ServerPreparedStatementState.newBuilder().setHandle(-1).setSqlQuery(sql).build(); return PreparedStatementProvider.build(schema, state, context.getQueryId(), context.getSession().getCatalogName()); }
switch(request.getType()){ case GET_CATALOGS: return new MetadataProvider.CatalogsProvider(context.getQueryId(), context.getSession(), dbContext, request.unwrap(GetCatalogsReq.class)); return new MetadataProvider.ColumnsProvider(context.getQueryId(), context.getSession(), dbContext, request.unwrap(GetColumnsReq.class)); return new MetadataProvider.SchemasProvider(context.getQueryId(), context.getSession(), dbContext, request.unwrap(GetSchemasReq.class)); return new MetadataProvider.TablesProvider(context.getQueryId(), context.getSession(), dbContext, request.unwrap(GetTablesReq.class)); return new ServerMetaProvider.ServerMetaCommandRunner(context.getQueryId(), context.getSession(), dbContext, request.unwrap(GetServerMetaReq.class));
storeTable.add(QueryIdHelper.getQueryId(context.getQueryId()));
private void depleteSend(SingleInputOperator op, VectorAccessible output, CollectingOutcomeListener listener) throws Exception{ while(op.getState() == State.CAN_PRODUCE){ int count = op.outputData(); final WritableBatch w = WritableBatch.get(output); QueryData header = QueryData.newBuilder() // .setQueryId(context.getQueryId()) // .setRowCount(count) // .setDef(w.getDef()).build(); QueryWritableBatch batch = new QueryWritableBatch(header, w.getBuffers()); listener.increment(); observer.execDataArrived(listener, batch); } }
private OperatorContextImpl createContext(Writer writer) { BufferAllocator allocator = context.getAllocator().newChildAllocator("direct-command", 0, Long.MAX_VALUE); final OperatorStats stats = new OperatorStats(new OpProfileDef(0,0,0), allocator); final OperatorContextImpl oc = new OperatorContextImpl( context.getConfig(), FragmentHandle.newBuilder().setQueryId(context.getQueryId()).setMajorFragmentId(0).setMinorFragmentId(0).build(), writer, allocator, allocator, null, stats, null, null, context.getFunctionRegistry(), null, context.getOptions(), context.getNamespaceService(), null, NodeDebugContextProvider.NOOP, 60000, null, ImmutableList.of()); return oc; } }
fragmentOptions, queryContext.getCurrentEndpoint(), queryContext.getQueryId(), endpoints, reader,
final UserBitShared.QueryId queryId = config.getContext().getQueryId(); final AttemptId attemptId = AttemptId.of(queryId);
@Override public Object execute() throws Exception { observer.execStarted(null); final BatchSchema schema = PojoRecordReader.getSchema(handler.getResultType()); final CollectingOutcomeListener listener = new CollectingOutcomeListener(); try(BufferAllocator allocator = context.getAllocator().newChildAllocator("direct-command", 0, Long.MAX_VALUE); VectorContainer vc = VectorContainer.create(allocator, schema); BufferManager manager = new BufferManagerImpl(allocator); final PojoRecordReader<T> reader = new PojoRecordReader<>(handler.getResultType(), result.iterator()); ) { reader.setup(new VectorContainerMutator(vc, manager)); int count = 0; while( (count = reader.next()) != 0){ vc.setRecordCount(count); final WritableBatch w = WritableBatch.get(vc); QueryData header = QueryData.newBuilder() // .setQueryId(context.getQueryId()) // .setRowCount(count) // .setDef(w.getDef()).build(); QueryWritableBatch batch = new QueryWritableBatch(header, w.getBuffers()); listener.increment(); observer.execDataArrived(listener, batch); } listener.waitForFinish(); } return null; }