private JoinCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver) { this.statement = statement; this.select = select; this.origResolver = resolver; this.useStarJoin = !select.getHint().hasHint(Hint.NO_STAR_JOIN); this.columnRefs = new HashMap<ColumnRef, ColumnRefType>(); this.columnNodes = new HashMap<ColumnRef, ColumnParseNode>(); this.useSortMergeJoin = select.getHint().hasHint(Hint.USE_SORT_MERGE_JOIN); }
public SerialIterators(QueryPlan plan, Integer perScanLimit, Integer offset, ParallelIteratorFactory iteratorFactory, ParallelScanGrouper scanGrouper, Scan scan, Map<ImmutableBytesPtr,ServerCache> caches, QueryPlan dataPlan) throws SQLException { super(plan, perScanLimit, offset, scanGrouper, scan, caches, dataPlan); this.offset = offset; // must be a offset or a limit specified or a SERIAL hint Preconditions.checkArgument( offset != null || perScanLimit != null || plan.getStatement().getHint().hasHint(HintNode.Hint.SERIAL)); this.iteratorFactory = iteratorFactory; }
private static boolean isSerial(StatementContext context, FilterableStatement statement, TableRef tableRef, OrderBy orderBy, boolean isDataWithinThreshold) throws SQLException { if (isDataWithinThreshold) { PTable table = tableRef.getTable(); boolean hasSerialHint = statement.getHint().hasHint(HintNode.Hint.SERIAL); boolean canBeExecutedSerially = ScanUtil.canQueryBeExecutedSerially(table, orderBy, context); if (!canBeExecutedSerially) { if (hasSerialHint) { logger.warn("This query cannot be executed serially. Ignoring the hint"); } return false; } return true; } return false; }
public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, List<? extends PDatum> targetColumns, ParallelIteratorFactory parallelIteratorFactory, SequenceManager sequenceManager, boolean projectTuples, boolean optimizeSubquery, Map<TableRef, QueryPlan> dataPlans) throws SQLException { this.statement = statement; this.select = select; this.resolver = resolver; this.scan = new Scan(); this.targetColumns = targetColumns; this.parallelIteratorFactory = parallelIteratorFactory; this.sequenceManager = sequenceManager; this.projectTuples = projectTuples; this.noChildParentJoinOptimization = select.getHint().hasHint(Hint.NO_CHILD_PARENT_JOIN_OPTIMIZATION) || select.getHint().hasHint(Hint.USE_PERSISTENT_CACHE); this.usePersistentCache = select.getHint().hasHint(Hint.USE_PERSISTENT_CACHE); ConnectionQueryServices services = statement.getConnection().getQueryServices(); this.costBased = services.getProps().getBoolean(QueryServices.COST_BASED_OPTIMIZER_ENABLED, QueryServicesOptions.DEFAULT_COST_BASED_OPTIMIZER_ENABLED); scan.setLoadColumnFamiliesOnDemand(true); if (select.getHint().hasHint(Hint.NO_CACHE)) { scan.setCacheBlocks(false); } scan.setCaching(statement.getFetchSize()); this.originalScan = ScanUtil.newScan(scan); this.optimizeSubquery = optimizeSubquery; this.dataPlans = dataPlans == null ? Collections.<TableRef, QueryPlan>emptyMap() : dataPlans; }
Iterator<byte[]> minMaxIterator = Collections.emptyIterator(); boolean isLocalIndex = ScanUtil.isLocalIndex(context.getScan()); boolean forceSkipScan = this.hint.hasHint(Hint.SKIP_SCAN); int nRanges = forceSkipScan ? scanRanges.getRanges().size() : scanRanges.getBoundSlotCount(); for (int i = 0, minPos = 0; minPos < nRanges || minMaxIterator.hasNext(); i++) {
final boolean useDataOverIndexHint = select.getHint().hasHint(Hint.USE_DATA_OVER_INDEX_TABLE); final int comparisonOfDataVersusIndexTable = useDataOverIndexHint ? -1 : 1; Collections.sort(bestCandidates, new Comparator<QueryPlan>() {
private AggregatePlan(StatementContext context, FilterableStatement statement, TableRef table, RowProjector projector, Integer limit, Integer offset, OrderBy orderBy, ParallelIteratorFactory parallelIteratorFactory, GroupBy groupBy, Expression having, Expression dynamicFilter, QueryPlan dataPlan) throws SQLException { super(context, statement, table, projector, context.getBindManager().getParameterMetaData(), limit, offset, orderBy, groupBy, parallelIteratorFactory, dynamicFilter, dataPlan); this.having = having; this.aggregators = context.getAggregationManager().getAggregators(); boolean hasSerialHint = statement.getHint().hasHint(HintNode.Hint.SERIAL); boolean canBeExecutedSerially = ScanUtil.canQueryBeExecutedSerially(table.getTable(), orderBy, context); if (hasSerialHint && !canBeExecutedSerially) { logger.warn("This query cannot be executed serially. Ignoring the hint"); } this.isSerial = hasSerialHint && canBeExecutedSerially; }
if (indexes.isEmpty() || dataPlan.isDegenerate() || dataPlan.getTableRef().hasDynamicCols() || select.getHint().hasHint(Hint.NO_INDEX)) { return Collections.<QueryPlan> singletonList(dataPlan);
buf.append("TIMELINE-CONSISTENCY "); if (hint.hasHint(Hint.SMALL)) { buf.append(Hint.SMALL).append(" ");
if (statement.getHint().hasHint(Hint.RANGE_SCAN) || statement.getHaving() != null) { return GroupBy.UNGROUPED_GROUP_BY;
public ClientAggregatePlan(StatementContext context, FilterableStatement statement, TableRef table, RowProjector projector, Integer limit, Integer offset, Expression where, OrderBy orderBy, GroupBy groupBy, Expression having, QueryPlan delegate) { super(context, statement, table, projector, limit, offset, where, orderBy, delegate); this.groupBy = groupBy; this.having = having; this.clientAggregators = context.getAggregationManager().getAggregators(); // We must deserialize rather than clone based off of client aggregators because // upon deserialization we create the server-side aggregators instead of the client-side // aggregators. We use the Configuration directly here to avoid the expense of creating // another one. this.serverAggregators = ServerAggregators.deserialize(context.getScan() .getAttribute(BaseScannerRegionObserver.AGGREGATORS), context.getConnection().getQueryServices().getConfiguration(), null); // Extract hash aggregate hint, if any. HintNode hints = statement.getHint(); useHashAgg = hints != null && hints.hasHint(HintNode.Hint.HASH_AGGREGATE); }
if (runOnServer && !delete.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) { select = SelectStatement.create(select, HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE));
&& context.getCurrentTable().getTable().getType() != PTableType.PROJECTED && context.getCurrentTable().getTable().getType() != PTableType.SUBQUERY && !statement.getHint().hasHint(Hint.FORWARD_SCAN)) { return OrderBy.REV_ROW_KEY_ORDER_BY;
if (!upsert.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) { hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE);
public SerialIterators(QueryPlan plan, Integer perScanLimit, Integer offset, ParallelIteratorFactory iteratorFactory, ParallelScanGrouper scanGrouper, Scan scan, Map<ImmutableBytesPtr,ServerCache> caches, QueryPlan dataPlan) throws SQLException { super(plan, perScanLimit, offset, scanGrouper, scan, caches, dataPlan); this.offset = offset; // must be a offset or a limit specified or a SERIAL hint Preconditions.checkArgument( offset != null || perScanLimit != null || plan.getStatement().getHint().hasHint(HintNode.Hint.SERIAL)); this.iteratorFactory = iteratorFactory; }
!plan.getStatement().getHint().hasHint(HintNode.Hint.RANGE_SCAN) && cols < plan.getTableRef().getTable().getRowKeySchema().getFieldCount() && plan.getGroupBy().isOrderPreserving() &&
QueryServicesOptions.DEFAULT_SMALL_SCAN_THRESHOLD); if (statement.getHint().hasHint(Hint.SMALL) || (scanRanges.isPointLookup() && scanRanges.getPointLookupCount() < smallScanThreshold)) { scan.setSmall(true);
new JoinType[]{type == JoinType.Right ? JoinType.Left : type}, new boolean[]{true}, new PTable[]{lhsTable}, new int[]{fieldPosition}, postJoinFilterExpression, QueryUtil.getOffsetLimit(limit, offset)); boolean usePersistentCache = joinTable.getStatement().getHint().hasHint(Hint.USE_PERSISTENT_CACHE); Pair<Expression, Expression> keyRangeExpressions = new Pair<Expression, Expression>(null, null); getKeyExpressionCombinations(keyRangeExpressions, context, joinTable.getStatement(), rhsTableRef, type, joinExpressions, hashExpressions);