@Override public ClusteringIndexFilter clusteringIndexFilter(DecoratedKey key) { return key.equals(startKey()) ? clusteringIndexFilter.forPaging(comparator, lastReturned, inclusive) : clusteringIndexFilter; }
@Override protected UnfilteredRowIterator initializeIterator() { sstable.incrementReadCount(); @SuppressWarnings("resource") // 'iter' is added to iterators which is closed on exception, or through the closing of the final merged iterator UnfilteredRowIterator iter = sstable.iterator(partitionKey(), filter.getSlices(metadata()), selectedColumns, filter.isReversed(), isForThrift); return iter; }
public boolean isFilterFullyCoveredBy(ClusteringIndexFilter filter, DataLimits limits, CachedPartition cached, int nowInSec) { // We can use the cached value only if we know that no data it doesn't contain could be covered // by the query filter, that is if: // 1) either the whole partition is cached // 2) or we can ensure than any data the filter selects is in the cached partition // We can guarantee that a partition is fully cached if the number of rows it contains is less than // what we're caching. Wen doing that, we should be careful about expiring cells: we should count // something expired that wasn't when the partition was cached, or we could decide that the whole // partition is cached when it's not. This is why we use CachedPartition#cachedLiveRows. if (cached.cachedLiveRows() < metadata.params.caching.rowsPerPartitionToCache()) return true; // If the whole partition isn't cached, then we must guarantee that the filter cannot select data that // is not in the cache. We can guarantee that if either the filter is a "head filter" and the cached // partition has more live rows that queried (where live rows refers to the rows that are live now), // or if we can prove that everything the filter selects is in the cached partition based on its content. return (filter.isHeadFilter() && limits.hasEnoughLiveData(cached, nowInSec, filter.selectsAllPartition(), metadata.enforceStrictLiveness())) || filter.isFullyCoveredBy(cached); }
@Override public boolean isReverseOrder() { return filter.isReversed(); }
UnfilteredRowIterator unfilteredRowIterator = clusteringIndexFilter().getUnfilteredRowIterator(columnFilter(), cachedPartition); cfs.metric.updateSSTableIterated(0); return unfilteredRowIterator; if (cacheFullPartitions || clusteringIndexFilter().isHeadFilter()) UnfilteredRowIterator cacheIterator = clusteringIndexFilter().getUnfilteredRowIterator(columnFilter(), toCache); if (cacheFullPartitions) return cacheIterator; return UnfilteredRowIterators.concat(cacheIterator, clusteringIndexFilter().filterNotIndexed(columnFilter(), iter));
public String toString(CFMetaData metadata) { return String.format("range=%s pfilter=%s", keyRange.getString(metadata.getKeyValidator()), clusteringIndexFilter.toString(metadata)); }
public boolean selectsAllPartition() { return clusteringIndexFilter.selectsAllPartition(); }
UnfilteredRowIterator iter = filter.getUnfilteredRowIterator(columnFilter(), partition); oldestUnrepairedTombstone = Math.min(oldestUnrepairedTombstone, partition.stats().minLocalDeletionTime); iterators.add(isForThrift() ? ThriftResultsMerger.maybeWrap(iter, nowInSec()) : iter); return EmptyIterators.unfilteredRow(cfs.metadata, partitionKey(), filter.isReversed());
protected void appendCQLWhereClause(StringBuilder sb) { sb.append(" WHERE "); sb.append(ColumnDefinition.toCQLString(metadata().partitionKeyColumns())).append(" = "); DataRange.appendKeyString(sb, metadata().getKeyValidator(), partitionKey().getKey()); // We put the row filter first because the clustering index filter can end by "ORDER BY" if (!rowFilter().isEmpty()) sb.append(" AND ").append(rowFilter()); String filterString = clusteringIndexFilter().toCQLString(metadata()); if (!filterString.isEmpty()) sb.append(" AND ").append(filterString); }
public UnfilteredRowIterator next() { Map.Entry<PartitionPosition, AtomicBTreePartition> entry = iter.next(); // Actual stored key should be true DecoratedKey assert entry.getKey() instanceof DecoratedKey; DecoratedKey key = (DecoratedKey)entry.getKey(); ClusteringIndexFilter filter = dataRange.clusteringIndexFilter(key); return filter.getUnfilteredRowIterator(columnFilter, entry.getValue()); } }
@Override public boolean isReverseOrder() { return filter.isReversed(); }
UnfilteredRowIterator unfilteredRowIterator = clusteringIndexFilter().getUnfilteredRowIterator(columnFilter(), cachedPartition); cfs.metric.updateSSTableIterated(0); return unfilteredRowIterator; if (cacheFullPartitions || clusteringIndexFilter().isHeadFilter()) UnfilteredRowIterator cacheIterator = clusteringIndexFilter().getUnfilteredRowIterator(columnFilter(), toCache); if (cacheFullPartitions) return cacheIterator; return UnfilteredRowIterators.concat(cacheIterator, clusteringIndexFilter().filterNotIndexed(columnFilter(), iter));
public String toString(CFMetaData metadata) { return String.format("range=%s pfilter=%s", keyRange.getString(metadata.getKeyValidator()), clusteringIndexFilter.toString(metadata)); }
public boolean selectsAllPartition() { return clusteringIndexFilter.selectsAllPartition(); }
UnfilteredRowIterator iter = filter.getUnfilteredRowIterator(columnFilter(), partition); oldestUnrepairedTombstone = Math.min(oldestUnrepairedTombstone, partition.stats().minLocalDeletionTime); iterators.add(isForThrift() ? ThriftResultsMerger.maybeWrap(iter, nowInSec()) : iter); return EmptyIterators.unfilteredRow(cfs.metadata, partitionKey(), filter.isReversed());
protected void appendCQLWhereClause(StringBuilder sb) { sb.append(" WHERE "); sb.append(ColumnDefinition.toCQLString(metadata().partitionKeyColumns())).append(" = "); DataRange.appendKeyString(sb, metadata().getKeyValidator(), partitionKey().getKey()); // We put the row filter first because the clustering index filter can end by "ORDER BY" if (!rowFilter().isEmpty()) sb.append(" AND ").append(rowFilter()); String filterString = clusteringIndexFilter().toCQLString(metadata()); if (!filterString.isEmpty()) sb.append(" AND ").append(filterString); }
public UnfilteredRowIterator next() { Map.Entry<PartitionPosition, AtomicBTreePartition> entry = iter.next(); // Actual stored key should be true DecoratedKey assert entry.getKey() instanceof DecoratedKey; DecoratedKey key = (DecoratedKey)entry.getKey(); ClusteringIndexFilter filter = dataRange.clusteringIndexFilter(key); return filter.getUnfilteredRowIterator(columnFilter, entry.getValue()); } }
@Override protected UnfilteredRowIterator initializeIterator() { @SuppressWarnings("resource") // 'iter' is added to iterators which is closed on exception, or through the closing of the final merged iterator UnfilteredRowIterator iter = sstable.iterator(partitionKey(), filter.getSlices(metadata()), selectedColumns, filter.isReversed(), isForThrift, listener); return isForThrift && applyThriftTransformation ? ThriftResultsMerger.maybeWrap(iter, nowInSec) : iter; }
@Override public boolean isReverseOrder() { return filter.isReversed(); }
public boolean isFilterFullyCoveredBy(ClusteringIndexFilter filter, DataLimits limits, CachedPartition cached, int nowInSec) { // We can use the cached value only if we know that no data it doesn't contain could be covered // by the query filter, that is if: // 1) either the whole partition is cached // 2) or we can ensure than any data the filter selects is in the cached partition // We can guarantee that a partition is fully cached if the number of rows it contains is less than // what we're caching. Wen doing that, we should be careful about expiring cells: we should count // something expired that wasn't when the partition was cached, or we could decide that the whole // partition is cached when it's not. This is why we use CachedPartition#cachedLiveRows. if (cached.cachedLiveRows() < metadata.params.caching.rowsPerPartitionToCache()) return true; // If the whole partition isn't cached, then we must guarantee that the filter cannot select data that // is not in the cache. We can guarantee that if either the filter is a "head filter" and the cached // partition has more live rows that queried (where live rows refers to the rows that are live now), // or if we can prove that everything the filter selects is in the cached partition based on its content. return (filter.isHeadFilter() && limits.hasEnoughLiveData(cached, nowInSec, filter.selectsAllPartition(), metadata.enforceStrictLiveness())) || filter.isFullyCoveredBy(cached); }