private PartitionIterator sendNextRequests() { List<PartitionIterator> concurrentQueries = new ArrayList<>(concurrencyFactor); for (int i = 0; i < concurrencyFactor && ranges.hasNext(); i++) { concurrentQueries.add(query(ranges.next(), i == 0)); ++rangesQueried; } Tracing.trace("Submitted {} concurrent range requests", concurrentQueries.size()); // We want to count the results for the sake of updating the concurrency factor (see updateConcurrencyFactor) but we don't want to // enforce any particular limit at this point (this could break code than rely on postReconciliationProcessing), hence the DataLimits.NONE. counter = DataLimits.NONE.newCounter(command.nowInSec(), true, command.selectsFullPartition(), enforceStrictLiveness); return counter.applyTo(PartitionIterators.concat(concurrentQueries)); }
liveReturned += counter.counted(); sentQueryIterator.close();
public UnfilteredRowIterator moreContents() assert !mergedResultCounter.isDoneForPartition(); if (!singleResultCounter.isDoneForPartition() && command.limits().perPartitionCount() == DataLimits.NO_LIMIT) return null;
public UnfilteredRowIterator moreContents() assert !mergedResultCounter.isDoneForPartition(); if (!singleResultCounter.isDoneForPartition() && command.limits().perPartitionCount() == DataLimits.NO_LIMIT) return null;
public UnfilteredRowIterator moreContents() assert !mergedResultCounter.isDoneForPartition(); if (!singleResultCounter.isDoneForPartition() && command.limits().perPartitionCount() == DataLimits.NO_LIMIT) return null;
liveReturned += counter.counted(); sentQueryIterator.close();
liveReturned += counter.counted(); sentQueryIterator.close();
private UnfilteredPartitionIterator extendWithShortReadProtection(UnfilteredPartitionIterator partitions, InetAddress source, DataLimits.Counter mergedResultCounter) { DataLimits.Counter singleResultCounter = command.limits().newCounter(command.nowInSec(), false, command.selectsFullPartition(), enforceStrictLiveness).onlyCount(); ShortReadPartitionsProtection protection = new ShortReadPartitionsProtection(source, singleResultCounter, mergedResultCounter, queryStartNanoTime); /* * The order of extention and transformations is important here. Extending with more partitions has to happen * first due to the way BaseIterator.hasMoreContents() works: only transformations applied after extension will * be called on the first partition of the extended iterator. * * Additionally, we want singleResultCounter to be applied after SRPP, so that its applyToPartition() method will * be called last, after the extension done by SRRP.applyToPartition() call. That way we preserve the same order * when it comes to calling SRRP.moreContents() and applyToRow() callbacks. * * See ShortReadPartitionsProtection.applyToPartition() for more details. */ // extend with moreContents() only if it's a range read command with no partition key specified if (!command.isLimitedToOnePartition()) partitions = MorePartitions.extend(partitions, protection); // register SRPP.moreContents() partitions = Transformation.apply(partitions, protection); // register SRPP.applyToPartition() partitions = Transformation.apply(partitions, singleResultCounter); // register the per-source counter return partitions; }
private UnfilteredPartitionIterator extendWithShortReadProtection(UnfilteredPartitionIterator partitions, InetAddress source, DataLimits.Counter mergedResultCounter) { DataLimits.Counter singleResultCounter = command.limits().newCounter(command.nowInSec(), false, command.selectsFullPartition(), enforceStrictLiveness).onlyCount(); ShortReadPartitionsProtection protection = new ShortReadPartitionsProtection(source, singleResultCounter, mergedResultCounter, queryStartNanoTime); /* * The order of extention and transformations is important here. Extending with more partitions has to happen * first due to the way BaseIterator.hasMoreContents() works: only transformations applied after extension will * be called on the first partition of the extended iterator. * * Additionally, we want singleResultCounter to be applied after SRPP, so that its applyToPartition() method will * be called last, after the extension done by SRRP.applyToPartition() call. That way we preserve the same order * when it comes to calling SRRP.moreContents() and applyToRow() callbacks. * * See ShortReadPartitionsProtection.applyToPartition() for more details. */ // extend with moreContents() only if it's a range read command with no partition key specified if (!command.isLimitedToOnePartition()) partitions = MorePartitions.extend(partitions, protection); // register SRPP.moreContents() partitions = Transformation.apply(partitions, protection); // register SRPP.applyToPartition() partitions = Transformation.apply(partitions, singleResultCounter); // register the per-source counter return partitions; }
private UnfilteredPartitionIterator extendWithShortReadProtection(UnfilteredPartitionIterator partitions, InetAddress source, DataLimits.Counter mergedResultCounter) { DataLimits.Counter singleResultCounter = command.limits().newCounter(command.nowInSec(), false, command.selectsFullPartition(), enforceStrictLiveness).onlyCount(); ShortReadPartitionsProtection protection = new ShortReadPartitionsProtection(source, singleResultCounter, mergedResultCounter, queryStartNanoTime); /* * The order of extention and transformations is important here. Extending with more partitions has to happen * first due to the way BaseIterator.hasMoreContents() works: only transformations applied after extension will * be called on the first partition of the extended iterator. * * Additionally, we want singleResultCounter to be applied after SRPP, so that its applyToPartition() method will * be called last, after the extension done by SRRP.applyToPartition() call. That way we preserve the same order * when it comes to calling SRRP.moreContents() and applyToRow() callbacks. * * See ShortReadPartitionsProtection.applyToPartition() for more details. */ // extend with moreContents() only if it's a range read command with no partition key specified if (!command.isLimitedToOnePartition()) partitions = MorePartitions.extend(partitions, protection); // register SRPP.moreContents() partitions = Transformation.apply(partitions, protection); // register SRPP.applyToPartition() partitions = Transformation.apply(partitions, singleResultCounter); // register the per-source counter return partitions; }
public boolean hasEnoughLiveData(CachedPartition cached, int nowInSec, boolean countPartitionsWithOnlyStaticData, boolean enforceStrictLiveness) { // We want the number of cells that are currently live. Getting that precise number forces // us to iterate the cached partition in general, but we can avoid that if: // - The number of non-expiring live cells is greater than the number of cells asked (we then // know we have enough live cells). // - The number of cells cached is less than requested, in which case we know we won't have enough. if (cached.nonExpiringLiveCells() >= cellPerPartitionLimit) return true; if (cached.nonTombstoneCellCount() < cellPerPartitionLimit) return false; // Otherwise, we need to re-count DataLimits.Counter counter = newCounter(nowInSec, false, countPartitionsWithOnlyStaticData, enforceStrictLiveness); try (UnfilteredRowIterator cacheIter = cached.unfilteredIterator(ColumnFilter.selection(cached.columns()), Slices.ALL, false); UnfilteredRowIterator iter = counter.applyTo(cacheIter)) { // Consume the iterator until we've counted enough while (iter.hasNext()) iter.next(); return counter.isDone(); } }
public boolean hasEnoughLiveData(CachedPartition cached, int nowInSec, boolean countPartitionsWithOnlyStaticData, boolean enforceStrictLiveness) { // We want the number of row that are currently live. Getting that precise number forces // us to iterate the cached partition in general, but we can avoid that if: // - The number of rows with at least one non-expiring cell is greater than what we ask, // in which case we know we have enough live. // - The number of rows is less than requested, in which case we know we won't have enough. if (cached.rowsWithNonExpiringCells() >= rowLimit) return true; if (cached.rowCount() < rowLimit) return false; // Otherwise, we need to re-count DataLimits.Counter counter = newCounter(nowInSec, false, countPartitionsWithOnlyStaticData, enforceStrictLiveness); try (UnfilteredRowIterator cacheIter = cached.unfilteredIterator(ColumnFilter.selection(cached.columns()), Slices.ALL, false); UnfilteredRowIterator iter = counter.applyTo(cacheIter)) { // Consume the iterator until we've counted enough while (iter.hasNext()) iter.next(); return counter.isDone(); } }
public boolean hasEnoughLiveData(CachedPartition cached, int nowInSec, boolean countPartitionsWithOnlyStaticData, boolean enforceStrictLiveness) { // We want the number of row that are currently live. Getting that precise number forces // us to iterate the cached partition in general, but we can avoid that if: // - The number of rows with at least one non-expiring cell is greater than what we ask, // in which case we know we have enough live. // - The number of rows is less than requested, in which case we know we won't have enough. if (cached.rowsWithNonExpiringCells() >= rowLimit) return true; if (cached.rowCount() < rowLimit) return false; // Otherwise, we need to re-count DataLimits.Counter counter = newCounter(nowInSec, false, countPartitionsWithOnlyStaticData, enforceStrictLiveness); try (UnfilteredRowIterator cacheIter = cached.unfilteredIterator(ColumnFilter.selection(cached.columns()), Slices.ALL, false); UnfilteredRowIterator iter = counter.applyTo(cacheIter)) { // Consume the iterator until we've counted enough while (iter.hasNext()) iter.next(); return counter.isDone(); } }
public boolean hasEnoughLiveData(CachedPartition cached, int nowInSec, boolean countPartitionsWithOnlyStaticData, boolean enforceStrictLiveness) { // We want the number of cells that are currently live. Getting that precise number forces // us to iterate the cached partition in general, but we can avoid that if: // - The number of non-expiring live cells is greater than the number of cells asked (we then // know we have enough live cells). // - The number of cells cached is less than requested, in which case we know we won't have enough. if (cached.nonExpiringLiveCells() >= cellPerPartitionLimit) return true; if (cached.nonTombstoneCellCount() < cellPerPartitionLimit) return false; // Otherwise, we need to re-count DataLimits.Counter counter = newCounter(nowInSec, false, countPartitionsWithOnlyStaticData, enforceStrictLiveness); try (UnfilteredRowIterator cacheIter = cached.unfilteredIterator(ColumnFilter.selection(cached.columns()), Slices.ALL, false); UnfilteredRowIterator iter = counter.applyTo(cacheIter)) { // Consume the iterator until we've counted enough while (iter.hasNext()) iter.next(); return counter.isDone(); } }