public boolean isFullyCoveredBy(CachedPartition partition) { // Partition is guaranteed to cover the whole filter if it includes the filter start and finish bounds. // (note that since partition is the head of a partition, to have no lower bound is ok) if (!slices.hasUpperBound() || partition.isEmpty()) return false; return partition.metadata().comparator.compare(slices.get(slices.size() - 1).end(), partition.lastRow().clustering()) <= 0; }
public boolean hasEnoughLiveData(CachedPartition cached, int nowInSec, boolean countPartitionsWithOnlyStaticData, boolean enforceStrictLiveness) { // We want the number of cells that are currently live. Getting that precise number forces // us to iterate the cached partition in general, but we can avoid that if: // - The number of non-expiring live cells is greater than the number of cells asked (we then // know we have enough live cells). // - The number of cells cached is less than requested, in which case we know we won't have enough. if (cached.nonExpiringLiveCells() >= cellPerPartitionLimit) return true; if (cached.nonTombstoneCellCount() < cellPerPartitionLimit) return false; // Otherwise, we need to re-count DataLimits.Counter counter = newCounter(nowInSec, false, countPartitionsWithOnlyStaticData, enforceStrictLiveness); try (UnfilteredRowIterator cacheIter = cached.unfilteredIterator(ColumnFilter.selection(cached.columns()), Slices.ALL, false); UnfilteredRowIterator iter = counter.applyTo(cacheIter)) { // Consume the iterator until we've counted enough while (iter.hasNext()) iter.next(); return counter.isDone(); } }
public boolean hasEnoughLiveData(CachedPartition cached, int nowInSec, boolean countPartitionsWithOnlyStaticData, boolean enforceStrictLiveness) { // We want the number of row that are currently live. Getting that precise number forces // us to iterate the cached partition in general, but we can avoid that if: // - The number of rows with at least one non-expiring cell is greater than what we ask, // in which case we know we have enough live. // - The number of rows is less than requested, in which case we know we won't have enough. if (cached.rowsWithNonExpiringCells() >= rowLimit) return true; if (cached.rowCount() < rowLimit) return false; // Otherwise, we need to re-count DataLimits.Counter counter = newCounter(nowInSec, false, countPartitionsWithOnlyStaticData, enforceStrictLiveness); try (UnfilteredRowIterator cacheIter = cached.unfilteredIterator(ColumnFilter.selection(cached.columns()), Slices.ALL, false); UnfilteredRowIterator iter = counter.applyTo(cacheIter)) { // Consume the iterator until we've counted enough while (iter.hasNext()) iter.next(); return counter.isDone(); } }
public boolean isFullyCoveredBy(CachedPartition partition) { if (partition.isEmpty()) return false; // 'partition' contains all columns, so it covers our filter if our last clusterings // is smaller than the last in the cache return clusterings.comparator().compare(clusterings.last(), partition.lastRow().clustering()) <= 0; }
public void serialize(CachedPartition partition, DataOutputPlus out) throws IOException { int version = MessagingService.current_version; assert partition instanceof CachedBTreePartition; CachedBTreePartition p = (CachedBTreePartition)partition; out.writeInt(p.createdAtInSec); out.writeInt(p.cachedLiveRows); out.writeInt(p.rowsWithNonExpiringCells); out.writeInt(p.nonTombstoneCellCount); out.writeInt(p.nonExpiringLiveCells); CFMetaData.serializer.serialize(partition.metadata(), out, version); try (UnfilteredRowIterator iter = p.unfilteredIterator()) { UnfilteredRowIteratorSerializer.serializer.serialize(iter, null, out, version, p.rowCount()); } }
public boolean isFilterFullyCoveredBy(ClusteringIndexFilter filter, DataLimits limits, CachedPartition cached, int nowInSec) { // We can use the cached value only if we know that no data it doesn't contain could be covered // by the query filter, that is if: // 1) either the whole partition is cached // 2) or we can ensure than any data the filter selects is in the cached partition // We can guarantee that a partition is fully cached if the number of rows it contains is less than // what we're caching. Wen doing that, we should be careful about expiring cells: we should count // something expired that wasn't when the partition was cached, or we could decide that the whole // partition is cached when it's not. This is why we use CachedPartition#cachedLiveRows. if (cached.cachedLiveRows() < metadata.params.caching.rowsPerPartitionToCache()) return true; // If the whole partition isn't cached, then we must guarantee that the filter cannot select data that // is not in the cache. We can guarantee that if either the filter is a "head filter" and the cached // partition has more live rows that queried (where live rows refers to the rows that are live now), // or if we can prove that everything the filter selects is in the cached partition based on its content. return (filter.isHeadFilter() && limits.hasEnoughLiveData(cached, nowInSec, filter.selectsAllPartition(), metadata.enforceStrictLiveness())) || filter.isFullyCoveredBy(cached); }
public boolean isFullyCoveredBy(CachedPartition partition) { if (partition.isEmpty()) return false; // 'partition' contains all columns, so it covers our filter if our last clusterings // is smaller than the last in the cache return clusterings.comparator().compare(clusterings.last(), partition.lastRow().clustering()) <= 0; }
public void serialize(CachedPartition partition, DataOutputPlus out) throws IOException { int version = MessagingService.current_version; assert partition instanceof CachedBTreePartition; CachedBTreePartition p = (CachedBTreePartition)partition; out.writeInt(p.createdAtInSec); out.writeInt(p.cachedLiveRows); out.writeInt(p.rowsWithNonExpiringCells); out.writeInt(p.nonTombstoneCellCount); out.writeInt(p.nonExpiringLiveCells); CFMetaData.serializer.serialize(partition.metadata(), out, version); try (UnfilteredRowIterator iter = p.unfilteredIterator()) { UnfilteredRowIteratorSerializer.serializer.serialize(iter, null, out, version, p.rowCount()); } }
public boolean isFilterFullyCoveredBy(ClusteringIndexFilter filter, DataLimits limits, CachedPartition cached, int nowInSec) { // We can use the cached value only if we know that no data it doesn't contain could be covered // by the query filter, that is if: // 1) either the whole partition is cached // 2) or we can ensure than any data the filter selects is in the cached partition // We can guarantee that a partition is fully cached if the number of rows it contains is less than // what we're caching. Wen doing that, we should be careful about expiring cells: we should count // something expired that wasn't when the partition was cached, or we could decide that the whole // partition is cached when it's not. This is why we use CachedPartition#cachedLiveRows. if (cached.cachedLiveRows() < metadata.params.caching.rowsPerPartitionToCache()) return true; // If the whole partition isn't cached, then we must guarantee that the filter cannot select data that // is not in the cache. We can guarantee that if either the filter is a "head filter" and the cached // partition has more live rows that queried (where live rows refers to the rows that are live now), // or if we can prove that everything the filter selects is in the cached partition based on its content. return (filter.isHeadFilter() && limits.hasEnoughLiveData(cached, nowInSec, filter.selectsAllPartition(), metadata.enforceStrictLiveness())) || filter.isFullyCoveredBy(cached); }
public boolean isFullyCoveredBy(CachedPartition partition) { // Partition is guaranteed to cover the whole filter if it includes the filter start and finish bounds. // (note that since partition is the head of a partition, to have no lower bound is ok) if (!slices.hasUpperBound() || partition.isEmpty()) return false; return partition.metadata().comparator.compare(slices.get(slices.size() - 1).end(), partition.lastRow().clustering()) <= 0; }
public boolean hasEnoughLiveData(CachedPartition cached, int nowInSec, boolean countPartitionsWithOnlyStaticData, boolean enforceStrictLiveness) { // We want the number of cells that are currently live. Getting that precise number forces // us to iterate the cached partition in general, but we can avoid that if: // - The number of non-expiring live cells is greater than the number of cells asked (we then // know we have enough live cells). // - The number of cells cached is less than requested, in which case we know we won't have enough. if (cached.nonExpiringLiveCells() >= cellPerPartitionLimit) return true; if (cached.nonTombstoneCellCount() < cellPerPartitionLimit) return false; // Otherwise, we need to re-count DataLimits.Counter counter = newCounter(nowInSec, false, countPartitionsWithOnlyStaticData, enforceStrictLiveness); try (UnfilteredRowIterator cacheIter = cached.unfilteredIterator(ColumnFilter.selection(cached.columns()), Slices.ALL, false); UnfilteredRowIterator iter = counter.applyTo(cacheIter)) { // Consume the iterator until we've counted enough while (iter.hasNext()) iter.next(); return counter.isDone(); } }
public boolean hasEnoughLiveData(CachedPartition cached, int nowInSec, boolean countPartitionsWithOnlyStaticData, boolean enforceStrictLiveness) { // We want the number of row that are currently live. Getting that precise number forces // us to iterate the cached partition in general, but we can avoid that if: // - The number of rows with at least one non-expiring cell is greater than what we ask, // in which case we know we have enough live. // - The number of rows is less than requested, in which case we know we won't have enough. if (cached.rowsWithNonExpiringCells() >= rowLimit) return true; if (cached.rowCount() < rowLimit) return false; // Otherwise, we need to re-count DataLimits.Counter counter = newCounter(nowInSec, false, countPartitionsWithOnlyStaticData, enforceStrictLiveness); try (UnfilteredRowIterator cacheIter = cached.unfilteredIterator(ColumnFilter.selection(cached.columns()), Slices.ALL, false); UnfilteredRowIterator iter = counter.applyTo(cacheIter)) { // Consume the iterator until we've counted enough while (iter.hasNext()) iter.next(); return counter.isDone(); } }
public boolean isFullyCoveredBy(CachedPartition partition) { if (partition.isEmpty()) return false; // 'partition' contains all columns, so it covers our filter if our last clusterings // is smaller than the last in the cache return clusterings.comparator().compare(clusterings.last(), partition.lastRow().clustering()) <= 0; }
public void serialize(CachedPartition partition, DataOutputPlus out) throws IOException { int version = MessagingService.current_version; assert partition instanceof CachedBTreePartition; CachedBTreePartition p = (CachedBTreePartition)partition; out.writeInt(p.createdAtInSec); out.writeInt(p.cachedLiveRows); out.writeInt(p.rowsWithNonExpiringCells); out.writeInt(p.nonTombstoneCellCount); out.writeInt(p.nonExpiringLiveCells); CFMetaData.serializer.serialize(partition.metadata(), out, version); try (UnfilteredRowIterator iter = p.unfilteredIterator()) { UnfilteredRowIteratorSerializer.serializer.serialize(iter, null, out, version, p.rowCount()); } }
public boolean isFilterFullyCoveredBy(ClusteringIndexFilter filter, DataLimits limits, CachedPartition cached, int nowInSec) { // We can use the cached value only if we know that no data it doesn't contain could be covered // by the query filter, that is if: // 1) either the whole partition is cached // 2) or we can ensure than any data the filter selects is in the cached partition // We can guarantee that a partition is fully cached if the number of rows it contains is less than // what we're caching. Wen doing that, we should be careful about expiring cells: we should count // something expired that wasn't when the partition was cached, or we could decide that the whole // partition is cached when it's not. This is why we use CachedPartition#cachedLiveRows. if (cached.cachedLiveRows() < metadata.params.caching.rowsPerPartitionToCache()) return true; // If the whole partition isn't cached, then we must guarantee that the filter cannot select data that // is not in the cache. We can guarantee that if either the filter is a "head filter" and the cached // partition has more live rows that queried (where live rows refers to the rows that are live now), // or if we can prove that everything the filter selects is in the cached partition based on its content. return (filter.isHeadFilter() && limits.hasEnoughLiveData(cached, nowInSec, filter.selectsAllPartition(), metadata.enforceStrictLiveness())) || filter.isFullyCoveredBy(cached); }
public boolean isFullyCoveredBy(CachedPartition partition) { // Partition is guaranteed to cover the whole filter if it includes the filter start and finish bounds. // (note that since partition is the head of a partition, to have no lower bound is ok) if (!slices.hasUpperBound() || partition.isEmpty()) return false; return partition.metadata().comparator.compare(slices.get(slices.size() - 1).end(), partition.lastRow().clustering()) <= 0; }
public boolean hasEnoughLiveData(CachedPartition cached, int nowInSec, boolean countPartitionsWithOnlyStaticData, boolean enforceStrictLiveness) { // We want the number of cells that are currently live. Getting that precise number forces // us to iterate the cached partition in general, but we can avoid that if: // - The number of non-expiring live cells is greater than the number of cells asked (we then // know we have enough live cells). // - The number of cells cached is less than requested, in which case we know we won't have enough. if (cached.nonExpiringLiveCells() >= cellPerPartitionLimit) return true; if (cached.nonTombstoneCellCount() < cellPerPartitionLimit) return false; // Otherwise, we need to re-count DataLimits.Counter counter = newCounter(nowInSec, false, countPartitionsWithOnlyStaticData, enforceStrictLiveness); try (UnfilteredRowIterator cacheIter = cached.unfilteredIterator(ColumnFilter.selection(cached.columns()), Slices.ALL, false); UnfilteredRowIterator iter = counter.applyTo(cacheIter)) { // Consume the iterator until we've counted enough while (iter.hasNext()) iter.next(); return counter.isDone(); } }