public boolean isExhausted(Counter counter) { return counter.counted() < count(); }
public boolean isExhausted(Counter counter) { return counter.counted() < count(); }
public boolean isExhausted(Counter counter) { return counter.counted() < count(); }
private void updateConcurrencyFactor() { if (liveReturned == 0) { // we haven't actually gotten any results, so query all remaining ranges at once concurrencyFactor = totalRangeCount - rangesQueried; return; } // Otherwise, compute how many rows per range we got on average and pick a concurrency factor // that should allow us to fetch all remaining rows with the next batch of (concurrent) queries. int remainingRows = command.limits().count() - liveReturned; float rowsPerRange = (float)liveReturned / (float)rangesQueried; concurrencyFactor = Math.max(1, Math.min(totalRangeCount - rangesQueried, Math.round(remainingRows / rowsPerRange))); logger.trace("Didn't get enough response rows; actual rows per range: {}; remaining rows: {}, new concurrent requests: {}", rowsPerRange, remainingRows, concurrencyFactor); }
private void updateConcurrencyFactor() { if (liveReturned == 0) { // we haven't actually gotten any results, so query all remaining ranges at once concurrencyFactor = totalRangeCount - rangesQueried; return; } // Otherwise, compute how many rows per range we got on average and pick a concurrency factor // that should allow us to fetch all remaining rows with the next batch of (concurrent) queries. int remainingRows = command.limits().count() - liveReturned; float rowsPerRange = (float)liveReturned / (float)rangesQueried; concurrencyFactor = Math.max(1, Math.min(totalRangeCount - rangesQueried, Math.round(remainingRows / rowsPerRange))); logger.trace("Didn't get enough response rows; actual rows per range: {}; remaining rows: {}, new concurrent requests: {}", rowsPerRange, remainingRows, concurrencyFactor); }
private void updateConcurrencyFactor() { if (liveReturned == 0) { // we haven't actually gotten any results, so query all remaining ranges at once concurrencyFactor = totalRangeCount - rangesQueried; return; } // Otherwise, compute how many rows per range we got on average and pick a concurrency factor // that should allow us to fetch all remaining rows with the next batch of (concurrent) queries. int remainingRows = command.limits().count() - liveReturned; float rowsPerRange = (float)liveReturned / (float)rangesQueried; concurrencyFactor = Math.max(1, Math.min(totalRangeCount - rangesQueried, Math.round(remainingRows / rowsPerRange))); logger.trace("Didn't get enough response rows; actual rows per range: {}; remaining rows: {}, new concurrent requests: {}", rowsPerRange, remainingRows, concurrencyFactor); }
protected AbstractQueryPager(ReadCommand command, ProtocolVersion protocolVersion) { this.command = command; this.protocolVersion = protocolVersion; this.limits = command.limits(); this.enforceStrictLiveness = command.metadata().enforceStrictLiveness(); this.remaining = limits.count(); this.remainingInPartition = limits.perPartitionCount(); }
protected AbstractQueryPager(ReadCommand command, ProtocolVersion protocolVersion) { this.command = command; this.protocolVersion = protocolVersion; this.limits = command.limits(); this.enforceStrictLiveness = command.metadata().enforceStrictLiveness(); this.remaining = limits.count(); this.remainingInPartition = limits.perPartitionCount(); }
protected AbstractQueryPager(ReadCommand command, ProtocolVersion protocolVersion) { this.command = command; this.protocolVersion = protocolVersion; this.limits = command.limits(); this.enforceStrictLiveness = command.metadata().enforceStrictLiveness(); this.remaining = limits.count(); this.remainingInPartition = limits.perPartitionCount(); }
@SuppressWarnings("resource") public static PartitionIterator getRangeSlice(PartitionRangeReadCommand command, ConsistencyLevel consistencyLevel, long queryStartNanoTime) { Tracing.trace("Computing ranges to query"); Keyspace keyspace = Keyspace.open(command.metadata().ksName); RangeIterator ranges = new RangeIterator(command, keyspace, consistencyLevel); // our estimate of how many result rows there will be per-range float resultsPerRange = estimateResultsPerRange(command, keyspace); // underestimate how many rows we will get per-range in order to increase the likelihood that we'll // fetch enough rows in the first round resultsPerRange -= resultsPerRange * CONCURRENT_SUBREQUESTS_MARGIN; int concurrencyFactor = resultsPerRange == 0.0 ? 1 : Math.max(1, Math.min(ranges.rangeCount(), (int) Math.ceil(command.limits().count() / resultsPerRange))); logger.trace("Estimated result rows per range: {}; requested rows: {}, ranges.size(): {}; concurrent range requests: {}", resultsPerRange, command.limits().count(), ranges.rangeCount(), concurrencyFactor); Tracing.trace("Submitting range requests on {} ranges with a concurrency of {} ({} rows per range expected)", ranges.rangeCount(), concurrencyFactor, resultsPerRange); // Note that in general, a RangeCommandIterator will honor the command limit for each range, but will not enforce it globally. return command.limits().filter(command.postReconciliationProcessing(new RangeCommandIterator(ranges, command, concurrencyFactor, keyspace, consistencyLevel, queryStartNanoTime)), command.nowInSec(), command.selectsFullPartition(), command.metadata().enforceStrictLiveness()); }
private long serializedSliceCommandSize(SinglePartitionReadCommand command) { CFMetaData metadata = command.metadata(); ClusteringIndexSliceFilter filter = (ClusteringIndexSliceFilter)command.clusteringIndexFilter(); Slices slices = filter.requestedSlices(); boolean makeStaticSlice = !command.columnFilter().fetchedColumns().statics.isEmpty() && !slices.selects(Clustering.STATIC_CLUSTERING); long size = serializedSlicesSize(slices, makeStaticSlice, metadata); size += TypeSizes.sizeof(command.clusteringIndexFilter().isReversed()); size += TypeSizes.sizeof(command.limits().count()); return size + TypeSizes.sizeof(0); // compositesToGroup }
private long serializedSliceCommandSize(SinglePartitionReadCommand command) { CFMetaData metadata = command.metadata(); ClusteringIndexSliceFilter filter = (ClusteringIndexSliceFilter)command.clusteringIndexFilter(); Slices slices = filter.requestedSlices(); boolean makeStaticSlice = !command.columnFilter().fetchedColumns().statics.isEmpty() && !slices.selects(Clustering.STATIC_CLUSTERING); long size = serializedSlicesSize(slices, makeStaticSlice, metadata); size += TypeSizes.sizeof(command.clusteringIndexFilter().isReversed()); size += TypeSizes.sizeof(command.limits().count()); return size + TypeSizes.sizeof(0); // compositesToGroup }
private long serializedSliceCommandSize(SinglePartitionReadCommand command) { CFMetaData metadata = command.metadata(); ClusteringIndexSliceFilter filter = (ClusteringIndexSliceFilter)command.clusteringIndexFilter(); Slices slices = filter.requestedSlices(); boolean makeStaticSlice = !command.columnFilter().fetchedColumns().statics.isEmpty() && !slices.selects(Clustering.STATIC_CLUSTERING); long size = serializedSlicesSize(slices, makeStaticSlice, metadata); size += TypeSizes.sizeof(command.clusteringIndexFilter().isReversed()); size += TypeSizes.sizeof(command.limits().count()); return size + TypeSizes.sizeof(0); // compositesToGroup }
public ResultMessage.Rows executeInternal(QueryState state, QueryOptions options, int nowInSec, long queryStartNanoTime) throws RequestExecutionException, RequestValidationException { int userLimit = getLimit(options); int userPerPartitionLimit = getPerPartitionLimit(options); int pageSize = options.getPageSize(); ReadQuery query = getQuery(options, nowInSec, userLimit, userPerPartitionLimit, pageSize); try (ReadExecutionController executionController = query.executionController()) { if (aggregationSpec == null && (pageSize <= 0 || (query.limits().count() <= pageSize))) { try (PartitionIterator data = query.executeInternal(executionController)) { return processResults(data, options, nowInSec, userLimit); } } else { QueryPager pager = getPager(query, options); return execute(Pager.forInternalQuery(pager, executionController), options, pageSize, nowInSec, userLimit, queryStartNanoTime); } } }
public ResultMessage.Rows executeInternal(QueryState state, QueryOptions options, int nowInSec, long queryStartNanoTime) throws RequestExecutionException, RequestValidationException { int userLimit = getLimit(options); int userPerPartitionLimit = getPerPartitionLimit(options); int pageSize = options.getPageSize(); ReadQuery query = getQuery(options, nowInSec, userLimit, userPerPartitionLimit, pageSize); try (ReadExecutionController executionController = query.executionController()) { if (aggregationSpec == null && (pageSize <= 0 || (query.limits().count() <= pageSize))) { try (PartitionIterator data = query.executeInternal(executionController)) { return processResults(data, options, nowInSec, userLimit); } } else { QueryPager pager = getPager(query, options); return execute(Pager.forInternalQuery(pager, executionController), options, pageSize, nowInSec, userLimit, queryStartNanoTime); } } }
public ResultMessage.Rows executeInternal(QueryState state, QueryOptions options, int nowInSec, long queryStartNanoTime) throws RequestExecutionException, RequestValidationException { int userLimit = getLimit(options); int userPerPartitionLimit = getPerPartitionLimit(options); int pageSize = options.getPageSize(); ReadQuery query = getQuery(options, nowInSec, userLimit, userPerPartitionLimit, pageSize); try (ReadExecutionController executionController = query.executionController()) { if (aggregationSpec == null && (pageSize <= 0 || (query.limits().count() <= pageSize))) { try (PartitionIterator data = query.executeInternal(executionController)) { return processResults(data, options, nowInSec, userLimit); } } else { QueryPager pager = getPager(query, options); return execute(Pager.forInternalQuery(pager, executionController), options, pageSize, nowInSec, userLimit, queryStartNanoTime); } } }
public ResultMessage.Rows execute(QueryState state, QueryOptions options, long queryStartNanoTime) throws RequestExecutionException, RequestValidationException { ConsistencyLevel cl = options.getConsistency(); checkNotNull(cl, "Invalid empty consistency level"); cl.validateForRead(keyspace()); int nowInSec = FBUtilities.nowInSeconds(); int userLimit = getLimit(options); int userPerPartitionLimit = getPerPartitionLimit(options); int pageSize = options.getPageSize(); ReadQuery query = getQuery(options, nowInSec, userLimit, userPerPartitionLimit, pageSize); if (aggregationSpec == null && (pageSize <= 0 || (query.limits().count() <= pageSize))) return execute(query, options, state, nowInSec, userLimit, queryStartNanoTime); QueryPager pager = getPager(query, options); return execute(Pager.forDistributedQuery(pager, cl, state.getClientState()), options, pageSize, nowInSec, userLimit, queryStartNanoTime); }
public ResultMessage.Rows execute(QueryState state, QueryOptions options, long queryStartNanoTime) throws RequestExecutionException, RequestValidationException { ConsistencyLevel cl = options.getConsistency(); checkNotNull(cl, "Invalid empty consistency level"); cl.validateForRead(keyspace()); int nowInSec = FBUtilities.nowInSeconds(); int userLimit = getLimit(options); int userPerPartitionLimit = getPerPartitionLimit(options); int pageSize = options.getPageSize(); ReadQuery query = getQuery(options, nowInSec, userLimit, userPerPartitionLimit, pageSize); if (aggregationSpec == null && (pageSize <= 0 || (query.limits().count() <= pageSize))) return execute(query, options, state, nowInSec, userLimit, queryStartNanoTime); QueryPager pager = getPager(query, options); return execute(Pager.forDistributedQuery(pager, cl, state.getClientState()), options, pageSize, nowInSec, userLimit, queryStartNanoTime); }
public ResultMessage.Rows execute(QueryState state, QueryOptions options, long queryStartNanoTime) throws RequestExecutionException, RequestValidationException { ConsistencyLevel cl = options.getConsistency(); checkNotNull(cl, "Invalid empty consistency level"); cl.validateForRead(keyspace()); int nowInSec = FBUtilities.nowInSeconds(); int userLimit = getLimit(options); int userPerPartitionLimit = getPerPartitionLimit(options); int pageSize = options.getPageSize(); ReadQuery query = getQuery(options, nowInSec, userLimit, userPerPartitionLimit, pageSize); if (aggregationSpec == null && (pageSize <= 0 || (query.limits().count() <= pageSize))) return execute(query, options, state, nowInSec, userLimit, queryStartNanoTime); QueryPager pager = getPager(query, options); return execute(Pager.forDistributedQuery(pager, cl, state.getClientState()), options, pageSize, nowInSec, userLimit, queryStartNanoTime); }
private void serializeSliceCommand(SinglePartitionReadCommand command, DataOutputPlus out) throws IOException { CFMetaData metadata = command.metadata(); ClusteringIndexSliceFilter filter = (ClusteringIndexSliceFilter)command.clusteringIndexFilter(); Slices slices = filter.requestedSlices(); boolean makeStaticSlice = !command.columnFilter().fetchedColumns().statics.isEmpty() && !slices.selects(Clustering.STATIC_CLUSTERING); serializeSlices(out, slices, filter.isReversed(), makeStaticSlice, metadata); out.writeBoolean(filter.isReversed()); boolean selectsStatics = !command.columnFilter().fetchedColumns().statics.isEmpty() || slices.selects(Clustering.STATIC_CLUSTERING); DataLimits limits = command.limits(); if (limits.isDistinct()) out.writeInt(1); // the limit is always 1 for DISTINCT queries else out.writeInt(updateLimitForQuery(command.limits().count(), filter.requestedSlices())); int compositesToGroup; if (limits.kind() == DataLimits.Kind.THRIFT_LIMIT || metadata.isDense()) compositesToGroup = -1; else if (limits.isDistinct() && !selectsStatics) compositesToGroup = -2; // for DISTINCT queries (CASSANDRA-8490) else compositesToGroup = metadata.clusteringColumns().size(); out.writeInt(compositesToGroup); }