private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, long timestamp, SlicePredicate predicate, ConsistencyLevel consistency_level, ClientState cState) throws org.apache.cassandra.exceptions.InvalidRequestException, UnavailableException, TimedOutException { CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family); ThriftValidation.validateColumnParent(metadata, column_parent); ThriftValidation.validatePredicate(metadata, column_parent, predicate); org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level); consistencyLevel.validateForRead(keyspace); List<ReadCommand> commands = new ArrayList<ReadCommand>(keys.size()); IDiskAtomFilter filter = toInternalFilter(metadata, column_parent, predicate); for (ByteBuffer key: keys) { ThriftValidation.validateKey(metadata, key); // Note that we should not share a slice filter amongst the command, due to SliceQueryFilter not being immutable // due to its columnCounter used by the lastCounted() method (also see SelectStatement.getSliceCommands) commands.add(ReadCommand.create(keyspace, key, column_parent.getColumn_family(), timestamp, filter.cloneShallow())); } return getSlice(commands, column_parent.isSetSuper_column(), consistencyLevel, cState); }
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, int nowInSec, SlicePredicate predicate, ConsistencyLevel consistency_level, ClientState cState, long queryStartNanoTime) throws org.apache.cassandra.exceptions.InvalidRequestException, UnavailableException, TimedOutException { CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family); ThriftValidation.validateColumnParent(metadata, column_parent); ThriftValidation.validatePredicate(metadata, column_parent, predicate); org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level); consistencyLevel.validateForRead(keyspace); List<SinglePartitionReadCommand> commands = new ArrayList<>(keys.size()); ColumnFilter columnFilter = makeColumnFilter(metadata, column_parent, predicate); ClusteringIndexFilter filter = toInternalFilter(metadata, column_parent, predicate); DataLimits limits = getLimits(1, metadata.isSuper() && !column_parent.isSetSuper_column(), predicate); for (ByteBuffer key: keys) { ThriftValidation.validateKey(metadata, key); DecoratedKey dk = metadata.decorateKey(key); commands.add(SinglePartitionReadCommand.create(true, metadata, nowInSec, columnFilter, RowFilter.NONE, limits, dk, filter)); } return getSlice(commands, column_parent.isSetSuper_column(), limits.perPartitionCount(), consistencyLevel, cState, queryStartNanoTime); }
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, int nowInSec, SlicePredicate predicate, ConsistencyLevel consistency_level, ClientState cState, long queryStartNanoTime) throws org.apache.cassandra.exceptions.InvalidRequestException, UnavailableException, TimedOutException { CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family); ThriftValidation.validateColumnParent(metadata, column_parent); ThriftValidation.validatePredicate(metadata, column_parent, predicate); org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level); consistencyLevel.validateForRead(keyspace); List<SinglePartitionReadCommand> commands = new ArrayList<>(keys.size()); ColumnFilter columnFilter = makeColumnFilter(metadata, column_parent, predicate); ClusteringIndexFilter filter = toInternalFilter(metadata, column_parent, predicate); DataLimits limits = getLimits(1, metadata.isSuper() && !column_parent.isSetSuper_column(), predicate); for (ByteBuffer key: keys) { ThriftValidation.validateKey(metadata, key); DecoratedKey dk = metadata.decorateKey(key); commands.add(SinglePartitionReadCommand.create(true, metadata, nowInSec, columnFilter, RowFilter.NONE, limits, dk, filter)); } return getSlice(commands, column_parent.isSetSuper_column(), limits.perPartitionCount(), consistencyLevel, cState, queryStartNanoTime); }
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false); ThriftValidation.validateColumnParent(metadata, column_parent); ThriftValidation.validatePredicate(metadata, column_parent, column_predicate); ThriftValidation.validateIndexClauses(metadata, index_clause); org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level);
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false); ThriftValidation.validateColumnParent(metadata, column_parent); ThriftValidation.validatePredicate(metadata, column_parent, column_predicate); ThriftValidation.validateIndexClauses(metadata, index_clause); org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level);
CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family, false); ThriftValidation.validateColumnParent(metadata, column_parent); ThriftValidation.validatePredicate(metadata, column_parent, column_predicate); ThriftValidation.validateIndexClauses(metadata, index_clause); org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level);
ThriftValidation.validatePredicate(metadata, column_parent, predicate); ThriftValidation.validateKeyRange(metadata, column_parent.super_column, range);
ThriftValidation.validatePredicate(metadata, column_parent, predicate); ThriftValidation.validateKeyRange(metadata, column_parent.super_column, range);
ThriftValidation.validatePredicate(metadata, column_parent, predicate); ThriftValidation.validateKeyRange(metadata, column_parent.super_column, range);