public SerialIterators(QueryPlan plan, Integer perScanLimit, Integer offset, ParallelIteratorFactory iteratorFactory, ParallelScanGrouper scanGrouper, Scan scan, Map<ImmutableBytesPtr,ServerCache> caches, QueryPlan dataPlan) throws SQLException { super(plan, perScanLimit, offset, scanGrouper, scan, caches, dataPlan); this.offset = offset; // must be a offset or a limit specified or a SERIAL hint Preconditions.checkArgument( offset != null || perScanLimit != null || plan.getStatement().getHint().hasHint(HintNode.Hint.SERIAL)); this.iteratorFactory = iteratorFactory; }
private static boolean isSerial(StatementContext context, FilterableStatement statement, TableRef tableRef, OrderBy orderBy, boolean isDataWithinThreshold) throws SQLException { if (isDataWithinThreshold) { PTable table = tableRef.getTable(); boolean hasSerialHint = statement.getHint().hasHint(HintNode.Hint.SERIAL); boolean canBeExecutedSerially = ScanUtil.canQueryBeExecutedSerially(table, orderBy, context); if (!canBeExecutedSerially) { if (hasSerialHint) { logger.warn("This query cannot be executed serially. Ignoring the hint"); } return false; } return true; } return false; }
private AggregatePlan(StatementContext context, FilterableStatement statement, TableRef table, RowProjector projector, Integer limit, Integer offset, OrderBy orderBy, ParallelIteratorFactory parallelIteratorFactory, GroupBy groupBy, Expression having, Expression dynamicFilter, QueryPlan dataPlan) throws SQLException { super(context, statement, table, projector, context.getBindManager().getParameterMetaData(), limit, offset, orderBy, groupBy, parallelIteratorFactory, dynamicFilter, dataPlan); this.having = having; this.aggregators = context.getAggregationManager().getAggregators(); boolean hasSerialHint = statement.getHint().hasHint(HintNode.Hint.SERIAL); boolean canBeExecutedSerially = ScanUtil.canQueryBeExecutedSerially(table.getTable(), orderBy, context); if (hasSerialHint && !canBeExecutedSerially) { logger.warn("This query cannot be executed serially. Ignoring the hint"); } this.isSerial = hasSerialHint && canBeExecutedSerially; }
public BaseResultIterators(QueryPlan plan, Integer perScanLimit, Integer offset, ParallelScanGrouper scanGrouper, Scan scan, Map<ImmutableBytesPtr,ServerCache> caches, QueryPlan dataPlan) throws SQLException { super(plan.getContext(), plan.getTableRef(), plan.getGroupBy(), plan.getOrderBy(), plan.getStatement().getHint(), QueryUtil.getOffsetLimit(plan.getLimit(), plan.getOffset()), offset); this.plan = plan; this.scan = scan;
public ClientAggregatePlan(StatementContext context, FilterableStatement statement, TableRef table, RowProjector projector, Integer limit, Integer offset, Expression where, OrderBy orderBy, GroupBy groupBy, Expression having, QueryPlan delegate) { super(context, statement, table, projector, limit, offset, where, orderBy, delegate); this.groupBy = groupBy; this.having = having; this.clientAggregators = context.getAggregationManager().getAggregators(); // We must deserialize rather than clone based off of client aggregators because // upon deserialization we create the server-side aggregators instead of the client-side // aggregators. We use the Configuration directly here to avoid the expense of creating // another one. this.serverAggregators = ServerAggregators.deserialize(context.getScan() .getAttribute(BaseScannerRegionObserver.AGGREGATORS), context.getConnection().getQueryServices().getConfiguration(), null); // Extract hash aggregate hint, if any. HintNode hints = statement.getHint(); useHashAgg = hints != null && hints.hasHint(HintNode.Hint.HASH_AGGREGATE); }
public SerialIterators(QueryPlan plan, Integer perScanLimit, Integer offset, ParallelIteratorFactory iteratorFactory, ParallelScanGrouper scanGrouper, Scan scan, Map<ImmutableBytesPtr,ServerCache> caches, QueryPlan dataPlan) throws SQLException { super(plan, perScanLimit, offset, scanGrouper, scan, caches, dataPlan); this.offset = offset; // must be a offset or a limit specified or a SERIAL hint Preconditions.checkArgument( offset != null || perScanLimit != null || plan.getStatement().getHint().hasHint(HintNode.Hint.SERIAL)); this.iteratorFactory = iteratorFactory; }
public SerialIterators(QueryPlan plan, Integer perScanLimit, Integer offset, ParallelIteratorFactory iteratorFactory, ParallelScanGrouper scanGrouper, Scan scan, Map<ImmutableBytesPtr,ServerCache> caches, QueryPlan dataPlan) throws SQLException { super(plan, perScanLimit, offset, scanGrouper, scan, caches, dataPlan); this.offset = offset; // must be a offset or a limit specified or a SERIAL hint Preconditions.checkArgument( offset != null || perScanLimit != null || plan.getStatement().getHint().hasHint(HintNode.Hint.SERIAL)); this.iteratorFactory = iteratorFactory; }
!plan.getStatement().getHint().hasHint(HintNode.Hint.RANGE_SCAN) && cols < plan.getTableRef().getTable().getRowKeySchema().getFieldCount() && plan.getGroupBy().isOrderPreserving() &&
QueryServicesOptions.DEFAULT_SMALL_SCAN_THRESHOLD); if (statement.getHint().hasHint(Hint.SMALL) || (scanRanges.isPointLookup() && scanRanges.getPointLookupCount() < smallScanThreshold)) { scan.setSmall(true);
private static boolean isSerial(StatementContext context, FilterableStatement statement, TableRef tableRef, OrderBy orderBy, boolean isDataWithinThreshold) throws SQLException { if (isDataWithinThreshold) { PTable table = tableRef.getTable(); boolean hasSerialHint = statement.getHint().hasHint(HintNode.Hint.SERIAL); boolean canBeExecutedSerially = ScanUtil.canQueryBeExecutedSerially(table, orderBy, context); if (!canBeExecutedSerially) { if (hasSerialHint) { logger.warn("This query cannot be executed serially. Ignoring the hint"); } return false; } return true; } return false; }
private static boolean isSerial(StatementContext context, FilterableStatement statement, TableRef tableRef, OrderBy orderBy, boolean isDataWithinThreshold) throws SQLException { if (isDataWithinThreshold) { PTable table = tableRef.getTable(); boolean hasSerialHint = statement.getHint().hasHint(HintNode.Hint.SERIAL); boolean canBeExecutedSerially = ScanUtil.canQueryBeExecutedSerially(table, orderBy, context); if (!canBeExecutedSerially) { if (hasSerialHint) { logger.warn("This query cannot be executed serially. Ignoring the hint"); } return false; } return true; } return false; }
private AggregatePlan(StatementContext context, FilterableStatement statement, TableRef table, RowProjector projector, Integer limit, Integer offset, OrderBy orderBy, ParallelIteratorFactory parallelIteratorFactory, GroupBy groupBy, Expression having, Expression dynamicFilter, QueryPlan dataPlan) throws SQLException { super(context, statement, table, projector, context.getBindManager().getParameterMetaData(), limit, offset, orderBy, groupBy, parallelIteratorFactory, dynamicFilter, dataPlan); this.having = having; this.aggregators = context.getAggregationManager().getAggregators(); boolean hasSerialHint = statement.getHint().hasHint(HintNode.Hint.SERIAL); boolean canBeExecutedSerially = ScanUtil.canQueryBeExecutedSerially(table.getTable(), orderBy, context); if (hasSerialHint && !canBeExecutedSerially) { logger.warn("This query cannot be executed serially. Ignoring the hint"); } this.isSerial = hasSerialHint && canBeExecutedSerially; }
private AggregatePlan(StatementContext context, FilterableStatement statement, TableRef table, RowProjector projector, Integer limit, Integer offset, OrderBy orderBy, ParallelIteratorFactory parallelIteratorFactory, GroupBy groupBy, Expression having, Expression dynamicFilter, QueryPlan dataPlan) throws SQLException { super(context, statement, table, projector, context.getBindManager().getParameterMetaData(), limit, offset, orderBy, groupBy, parallelIteratorFactory, dynamicFilter, dataPlan); this.having = having; this.aggregators = context.getAggregationManager().getAggregators(); boolean hasSerialHint = statement.getHint().hasHint(HintNode.Hint.SERIAL); boolean canBeExecutedSerially = ScanUtil.canQueryBeExecutedSerially(table.getTable(), orderBy, context); if (hasSerialHint && !canBeExecutedSerially) { logger.warn("This query cannot be executed serially. Ignoring the hint"); } this.isSerial = hasSerialHint && canBeExecutedSerially; }
public BaseResultIterators(QueryPlan plan, Integer perScanLimit, Integer offset, ParallelScanGrouper scanGrouper, Scan scan, Map<ImmutableBytesPtr,ServerCache> caches, QueryPlan dataPlan) throws SQLException { super(plan.getContext(), plan.getTableRef(), plan.getGroupBy(), plan.getOrderBy(), plan.getStatement().getHint(), QueryUtil.getOffsetLimit(plan.getLimit(), plan.getOffset()), offset); this.plan = plan; this.scan = scan;
public BaseResultIterators(QueryPlan plan, Integer perScanLimit, Integer offset, ParallelScanGrouper scanGrouper, Scan scan, Map<ImmutableBytesPtr,ServerCache> caches, QueryPlan dataPlan) throws SQLException { super(plan.getContext(), plan.getTableRef(), plan.getGroupBy(), plan.getOrderBy(), plan.getStatement().getHint(), QueryUtil.getOffsetLimit(plan.getLimit(), plan.getOffset()), offset); this.plan = plan; this.scan = scan;
public ClientAggregatePlan(StatementContext context, FilterableStatement statement, TableRef table, RowProjector projector, Integer limit, Integer offset, Expression where, OrderBy orderBy, GroupBy groupBy, Expression having, QueryPlan delegate) { super(context, statement, table, projector, limit, offset, where, orderBy, delegate); this.groupBy = groupBy; this.having = having; this.clientAggregators = context.getAggregationManager().getAggregators(); // We must deserialize rather than clone based off of client aggregators because // upon deserialization we create the server-side aggregators instead of the client-side // aggregators. We use the Configuration directly here to avoid the expense of creating // another one. this.serverAggregators = ServerAggregators.deserialize(context.getScan() .getAttribute(BaseScannerRegionObserver.AGGREGATORS), context.getConnection().getQueryServices().getConfiguration(), null); // Extract hash aggregate hint, if any. HintNode hints = statement.getHint(); useHashAgg = hints != null && hints.hasHint(HintNode.Hint.HASH_AGGREGATE); }
!plan.getStatement().getHint().hasHint(HintNode.Hint.RANGE_SCAN) && cols < plan.getTableRef().getTable().getRowKeySchema().getFieldCount() && plan.getGroupBy().isOrderPreserving() &&