private int getScannerCacheSize() { try { return plan.getContext().getStatement().getFetchSize(); } catch (Throwable e) { Throwables.propagate(e); } return -1; // unreachable }
/** * Selecting underlying scanners in a round-robin fashion is possible if there is no ordering of * rows needed, not even row key order. Also no point doing round robin of scanners if fetch * size is 1. */ public static boolean isRoundRobinPossible(OrderBy orderBy, StatementContext context) throws SQLException { int fetchSize = context.getStatement().getFetchSize(); return fetchSize > 1 && !shouldRowsBeInRowKeyOrder(orderBy, context) && orderBy.getOrderByExpressions().isEmpty(); }
public PhoenixResultSet(ResultIterator resultIterator, RowProjector rowProjector, StatementContext ctx) throws SQLException { this.rowProjector = rowProjector; this.scanner = resultIterator; this.context = ctx; this.statement = context.getStatement(); this.readMetricsQueue = context.getReadMetricsQueue(); this.overAllQueryMetrics = context.getOverallQueryMetrics(); this.queryLogger = context.getQueryLogger() != null ? context.getQueryLogger() : QueryLogger.NO_OP_INSTANCE; }
/** * Executes the scan in parallel across all regions, blocking until all scans are complete. * @return the result iterators for the scan of each region */ @Override public List<PeekingResultIterator> getIterators() throws SQLException { if (logger.isDebugEnabled()) { logger.debug(LogUtil.addCustomAnnotations("Getting iterators for " + this, ScanUtil.getCustomAnnotations(scan))); } boolean isReverse = ScanUtil.isReversed(scan); boolean isLocalIndex = getTable().getIndexType() == IndexType.LOCAL; final ConnectionQueryServices services = context.getConnection().getQueryServices(); // Get query time out from Statement final long startTime = EnvironmentEdgeManager.currentTimeMillis(); final long maxQueryEndTime = startTime + context.getStatement().getQueryTimeoutInMillis(); int numScans = size(); // Capture all iterators so that if something goes wrong, we close them all // The iterators list is based on the submission of work, so it may not // contain them all (for example if work was rejected from the queue) Queue<PeekingResultIterator> allIterators = new ConcurrentLinkedQueue<>(); List<PeekingResultIterator> iterators = new ArrayList<PeekingResultIterator>(numScans); ScanWrapper previousScan = new ScanWrapper(null); return getIterators(scans, services, isLocalIndex, allIterators, iterators, isReverse, maxQueryEndTime, splits.size(), previousScan, context.getConnection().getQueryServices().getConfiguration() .getInt(QueryConstants.HASH_JOIN_CACHE_RETRIES, QueryConstants.DEFAULT_HASH_JOIN_CACHE_RETRIES)); }
TableRef tableRef = bestPlan.getTableRef(); PTable table = tableRef.getTable(); PhoenixStatement statement = context.getStatement(); PhoenixConnection connection = statement.getConnection(); PName tenantId = connection.getTenantId();
for (JoinCompiler.Strategy strategy : strategies) { StatementContext newContext = new StatementContext( context.getStatement(), context.getResolver(), new Scan(), context.getSequenceManager()); QueryPlan plan = compileJoinQuery( strategy, newContext, binds, joinTable, asSubquery, projectPKColumns, orderBy);
SQLException toThrow = null; final HashCacheClient hashCacheClient = new HashCacheClient(context.getConnection()); int queryTimeOut = context.getStatement().getQueryTimeoutInMillis(); try { submitWork(scan, futures, allIterators, splitSize, isReverse, scanGrouper);
private int getScannerCacheSize() { try { return plan.getContext().getStatement().getFetchSize(); } catch (Throwable e) { Throwables.propagate(e); } return -1; // unreachable }
private int getScannerCacheSize() { try { return plan.getContext().getStatement().getFetchSize(); } catch (Throwable e) { Throwables.propagate(e); } return -1; // unreachable }
int scannerCacheSize = context.getStatement().getFetchSize(); if (limit != null && limit % scannerCacheSize == 0) { scan.setCaching(scannerCacheSize + 1);
/** * Selecting underlying scanners in a round-robin fashion is possible if there is no ordering of * rows needed, not even row key order. Also no point doing round robin of scanners if fetch * size is 1. */ public static boolean isRoundRobinPossible(OrderBy orderBy, StatementContext context) throws SQLException { int fetchSize = context.getStatement().getFetchSize(); return fetchSize > 1 && !shouldRowsBeInRowKeyOrder(orderBy, context) && orderBy.getOrderByExpressions().isEmpty(); }
/** * Selecting underlying scanners in a round-robin fashion is possible if there is no ordering of * rows needed, not even row key order. Also no point doing round robin of scanners if fetch * size is 1. */ public static boolean isRoundRobinPossible(OrderBy orderBy, StatementContext context) throws SQLException { int fetchSize = context.getStatement().getFetchSize(); return fetchSize > 1 && !shouldRowsBeInRowKeyOrder(orderBy, context) && orderBy.getOrderByExpressions().isEmpty(); }
public static MutationState upsertSelect(StatementContext childContext, TableRef tableRef, RowProjector projector, ResultIterator iterator, int[] columnIndexes, int[] pkSlotIndexes, boolean useServerTimestamp, boolean prefixSysColValues) throws SQLException { PhoenixStatement statement = childContext.getStatement(); PhoenixConnection connection = statement.getConnection(); ConnectionQueryServices services = connection.getQueryServices();
public PhoenixResultSet(ResultIterator resultIterator, RowProjector rowProjector, StatementContext ctx) throws SQLException { this.rowProjector = rowProjector; this.scanner = resultIterator; this.context = ctx; this.statement = context.getStatement(); this.readMetricsQueue = context.getReadMetricsQueue(); this.overAllQueryMetrics = context.getOverallQueryMetrics(); this.queryLogger = context.getQueryLogger() != null ? context.getQueryLogger() : QueryLogger.NO_OP_INSTANCE; }
public PhoenixResultSet(ResultIterator resultIterator, RowProjector rowProjector, StatementContext ctx) throws SQLException { this.rowProjector = rowProjector; this.scanner = resultIterator; this.context = ctx; this.statement = context.getStatement(); this.readMetricsQueue = context.getReadMetricsQueue(); this.overAllQueryMetrics = context.getOverallQueryMetrics(); this.queryLogger = context.getQueryLogger() != null ? context.getQueryLogger() : QueryLogger.NO_OP_INSTANCE; }
/** * Executes the scan in parallel across all regions, blocking until all scans are complete. * @return the result iterators for the scan of each region */ @Override public List<PeekingResultIterator> getIterators() throws SQLException { if (logger.isDebugEnabled()) { logger.debug(LogUtil.addCustomAnnotations("Getting iterators for " + this, ScanUtil.getCustomAnnotations(scan))); } boolean isReverse = ScanUtil.isReversed(scan); boolean isLocalIndex = getTable().getIndexType() == IndexType.LOCAL; final ConnectionQueryServices services = context.getConnection().getQueryServices(); // Get query time out from Statement final long startTime = EnvironmentEdgeManager.currentTimeMillis(); final long maxQueryEndTime = startTime + context.getStatement().getQueryTimeoutInMillis(); int numScans = size(); // Capture all iterators so that if something goes wrong, we close them all // The iterators list is based on the submission of work, so it may not // contain them all (for example if work was rejected from the queue) Queue<PeekingResultIterator> allIterators = new ConcurrentLinkedQueue<>(); List<PeekingResultIterator> iterators = new ArrayList<PeekingResultIterator>(numScans); ScanWrapper previousScan = new ScanWrapper(null); return getIterators(scans, services, isLocalIndex, allIterators, iterators, isReverse, maxQueryEndTime, splits.size(), previousScan, context.getConnection().getQueryServices().getConfiguration() .getInt(QueryConstants.HASH_JOIN_CACHE_RETRIES, QueryConstants.DEFAULT_HASH_JOIN_CACHE_RETRIES)); }
/** * Executes the scan in parallel across all regions, blocking until all scans are complete. * @return the result iterators for the scan of each region */ @Override public List<PeekingResultIterator> getIterators() throws SQLException { if (logger.isDebugEnabled()) { logger.debug(LogUtil.addCustomAnnotations("Getting iterators for " + this, ScanUtil.getCustomAnnotations(scan))); } boolean isReverse = ScanUtil.isReversed(scan); boolean isLocalIndex = getTable().getIndexType() == IndexType.LOCAL; final ConnectionQueryServices services = context.getConnection().getQueryServices(); // Get query time out from Statement final long startTime = EnvironmentEdgeManager.currentTimeMillis(); final long maxQueryEndTime = startTime + context.getStatement().getQueryTimeoutInMillis(); int numScans = size(); // Capture all iterators so that if something goes wrong, we close them all // The iterators list is based on the submission of work, so it may not // contain them all (for example if work was rejected from the queue) Queue<PeekingResultIterator> allIterators = new ConcurrentLinkedQueue<>(); List<PeekingResultIterator> iterators = new ArrayList<PeekingResultIterator>(numScans); ScanWrapper previousScan = new ScanWrapper(null); return getIterators(scans, services, isLocalIndex, allIterators, iterators, isReverse, maxQueryEndTime, splits.size(), previousScan, context.getConnection().getQueryServices().getConfiguration() .getInt(QueryConstants.HASH_JOIN_CACHE_RETRIES, QueryConstants.DEFAULT_HASH_JOIN_CACHE_RETRIES)); }
private static void forceIndexCheck(QueryPlan plan) throws SQLException { PhoenixStatement statement = plan.getContext().getStatement(); ReadOnlyProps props = statement.getConnection().getQueryServices().getProps(); StatementContext context = plan.getContext(); if (plan.getProjector() == EXPLAIN_PLAN_ROW_PROJECTOR_WITH_BYTE_ROW_ESTIMATES || context.getWhereConditionColumns().isEmpty() || !props.getBoolean(PHOENIX_FORCE_INDEX, DEFAULT_PHOENIX_FORCE_INDEX)) { return; } SelectStatement select = (SelectStatement) plan.getStatement(); if (!select.getHint().hasHint(Hint.NO_INDEX) && !select.isJoin() && !select.isUnion() && context.getFilterConditionsPKColumns().isEmpty()) { PTable table = plan.getTableRef().getTable(); String schema = table.getSchemaName() == null ? null : table.getSchemaName().getString(); throw new ForceIndexException(schema, table.getTableName().toString(), "The filters must contain at least one index column."); } }
for (JoinCompiler.Strategy strategy : strategies) { StatementContext newContext = new StatementContext( context.getStatement(), context.getResolver(), new Scan(), context.getSequenceManager()); QueryPlan plan = compileJoinQuery( strategy, newContext, binds, joinTable, asSubquery, projectPKColumns, orderBy);
for (JoinCompiler.Strategy strategy : strategies) { StatementContext newContext = new StatementContext( context.getStatement(), context.getResolver(), new Scan(), context.getSequenceManager()); QueryPlan plan = compileJoinQuery( strategy, newContext, binds, joinTable, asSubquery, projectPKColumns, orderBy);