/** * return a QueryFilter object that includes every column in the row. * This is dangerous on large rows; avoid except for test code. */ public static QueryFilter getIdentityFilter(DecoratedKey key, String cfName, long timestamp) { return new QueryFilter(key, cfName, new IdentityQueryFilter(), timestamp); }
/** * @return a QueryFilter object that will return columns matching the given names * @param key the row to slice * @param cfName column family to query * @param columns the column names to restrict the results to, sorted in comparator order */ public static QueryFilter getNamesFilter(DecoratedKey key, String cfName, SortedSet<CellName> columns, long timestamp) { return new QueryFilter(key, cfName, new NamesQueryFilter(columns), timestamp); }
/** * @return a QueryFilter object to satisfy the given slice criteria: * @param key the row to slice * @param cfName column family to query * @param start column to start slice at, inclusive; empty for "the first column" * @param finish column to stop slice at, inclusive; empty for "the last column" * @param reversed true to start with the largest column (as determined by configured sort order) instead of smallest * @param limit maximum number of non-deleted columns to return * @param timestamp time to use for determining expiring columns' state */ public static QueryFilter getSliceFilter(DecoratedKey key, String cfName, Composite start, Composite finish, boolean reversed, int limit, long timestamp) { return new QueryFilter(key, cfName, new SliceQueryFilter(start, finish, reversed, limit), timestamp); }
public Row getRow(Keyspace keyspace) { DecoratedKey dk = StorageService.getPartitioner().decorateKey(key); return keyspace.getRow(new QueryFilter(dk, cfName, filter, timestamp)); }
private Map<CellName, ColumnFamily> getCellNameColumnFamilyMap(DecoratedKey dk, ColumnSlice[] columnSlices) { SliceQueryFilter sliceQueryFilter = new SliceQueryFilter(columnSlices, false, Integer.MAX_VALUE); QueryFilter queryFilter = new QueryFilter(dk, tableMapper.table.name, sliceQueryFilter, filter.timestamp); ColumnFamily columnFamily = tableMapper.table.getColumnFamily(queryFilter); return tableMapper.getRows(columnFamily); }
public Row getRow(Keyspace keyspace) { CFMetaData cfm = Schema.instance.getCFMetaData(ksName, cfName); DecoratedKey dk = StorageService.getPartitioner().decorateKey(key); // If we're doing a reversed query and the filter includes static columns, we need to issue two separate // reads in order to guarantee that the static columns are fetched. See CASSANDRA-8502 for more details. if (filter.reversed && filter.hasStaticSlice(cfm)) { logger.debug("Splitting reversed slice with static columns into two reads"); Pair<SliceQueryFilter, SliceQueryFilter> newFilters = filter.splitOutStaticSlice(cfm); Row normalResults = keyspace.getRow(new QueryFilter(dk, cfName, newFilters.right, timestamp)); Row staticResults = keyspace.getRow(new QueryFilter(dk, cfName, newFilters.left, timestamp)); // add the static results to the start of the normal results if (normalResults.cf == null) return staticResults; if (staticResults.cf != null) for (Cell cell : staticResults.cf.getReverseSortedColumns()) normalResults.cf.addColumn(cell); return normalResults; } return keyspace.getRow(new QueryFilter(dk, cfName, filter, timestamp)); }
public Pair<RowCacheKey, IRowCacheEntry> call() throws Exception { DecoratedKey key = cfs.partitioner.decorateKey(buffer); QueryFilter cacheFilter = new QueryFilter(key, cfs.getColumnFamilyName(), cfs.readFilterForCache(), Integer.MIN_VALUE); ColumnFamily data = cfs.getTopLevelColumns(cacheFilter, Integer.MIN_VALUE); return Pair.create(new RowCacheKey(cfs.metadata.ksAndCFName, key), (IRowCacheEntry) data); } });
private void loadOldRow(DecoratedKey dk, ByteBuffer pkBuf, List<Field> fields) { CellName clusteringKey = tableMapper.makeClusteringKey(pkBuf); Composite start = tableMapper.start(clusteringKey); Composite end = tableMapper.end(start); ColumnSlice columnSlice = new ColumnSlice(start, end); SliceQueryFilter sliceQueryFilter = new SliceQueryFilter(columnSlice, false, Integer.MAX_VALUE); QueryFilter queryFilter = new QueryFilter(dk, tableMapper.table.name, sliceQueryFilter, new Date().getTime()); ColumnFamily columnFamily = tableMapper.table.getColumnFamily(queryFilter); Map<CellName, ColumnFamily> fullSlice = tableMapper.getRows(columnFamily); ColumnFamily oldDocument = fullSlice.get(clusteringKey); for (Cell cell : oldDocument) { CellName cellName = cell.name(); ColumnIdentifier cql3ColName = cellName.cql3ColumnName(tableMapper.cfMetaData); String actualColName = cql3ColName.toString(); ColumnDefinition columnDefinition = tableMapper.cfMetaData.getColumnDefinition(cql3ColName); if (options.shouldIndex(actualColName)) { addFields(cell, actualColName, columnDefinition, fields); } } }
static ColumnFamily resolveSuperset(Iterable<ColumnFamily> versions, long now) { assert Iterables.size(versions) > 0; ColumnFamily resolved = null; for (ColumnFamily cf : versions) { if (cf == null) continue; if (resolved == null) resolved = cf.cloneMeShallow(); else resolved.delete(cf); } if (resolved == null) return null; // mimic the collectCollatedColumn + removeDeleted path that getColumnFamily takes. // this will handle removing columns and subcolumns that are suppressed by a row or // supercolumn tombstone. QueryFilter filter = new QueryFilter(null, resolved.metadata().cfName, new IdentityQueryFilter(), now); List<CloseableIterator<Cell>> iters = new ArrayList<>(Iterables.size(versions)); for (ColumnFamily version : versions) if (version != null) iters.add(FBUtilities.closeableIterator(version.iterator())); filter.collateColumns(resolved, iters, Integer.MIN_VALUE); return ColumnFamilyStore.removeDeleted(resolved, Integer.MIN_VALUE); }
ColumnFamily data = baseCfs.getColumnFamily(new QueryFilter(dk, baseCfs.name, filter.columnFilter(lastSeenKey.toByteBuffer()), filter.timestamp)); if (extraFilter != null) ColumnFamily cf = baseCfs.getColumnFamily(new QueryFilter(dk, baseCfs.name, extraFilter, filter.timestamp)); if (cf != null) data.addAll(cf);
: new ColumnSlice[]{ dataSlice }; SliceQueryFilter dataFilter = new SliceQueryFilter(slices, false, Integer.MAX_VALUE, baseCfs.metadata.clusteringColumns().size()); ColumnFamily newData = baseCfs.getColumnFamily(new QueryFilter(dk, baseCfs.name, dataFilter, filter.timestamp)); if (newData == null || index.isStale(entry, newData, filter.timestamp))
if (extraFilter != null) ColumnFamily cf = filter.cfs.getColumnFamily(new QueryFilter(rawRow.key, name, extraFilter, filter.timestamp)); if (cf != null) data.addAll(cf);
QueryFilter reducedFilter = new QueryFilter(filter.key, filter.cfName, namesFilter.withUpdatedColumns(filterColumns), filter.timestamp);
QueryFilter cacheFilter = new QueryFilter(filter.key, name, cacheSlice, filter.timestamp);