@Override public Row applyToRow(Row row) { if (!currentKey.equals(lastKey)) { remainingInPartition = limits.perPartitionCount(); lastKey = currentKey; } lastRow = row; return row; } }
public Row applyToStatic(Row row) { if (!row.isEmpty()) { remainingInPartition = limits.perPartitionCount(); lastKey = currentKey; lastRow = row; } return row; }
public Row applyToStatic(Row row) { if (!row.isEmpty()) { remainingInPartition = limits.perPartitionCount(); lastKey = currentKey; lastRow = row; } return row; }
@Override public Row applyToRow(Row row) { if (!currentKey.equals(lastKey)) { remainingInPartition = limits.perPartitionCount(); lastKey = currentKey; } lastRow = row; return row; } }
public Row applyToStatic(Row row) { if (!row.isEmpty()) { remainingInPartition = limits.perPartitionCount(); lastKey = currentKey; lastRow = row; } return row; }
@Override public Row applyToRow(Row row) { if (!currentKey.equals(lastKey)) { remainingInPartition = limits.perPartitionCount(); lastKey = currentKey; } lastRow = row; return row; } }
protected AbstractQueryPager(ReadCommand command, ProtocolVersion protocolVersion) { this.command = command; this.protocolVersion = protocolVersion; this.limits = command.limits(); this.enforceStrictLiveness = command.metadata().enforceStrictLiveness(); this.remaining = limits.count(); this.remainingInPartition = limits.perPartitionCount(); }
protected AbstractQueryPager(ReadCommand command, ProtocolVersion protocolVersion) { this.command = command; this.protocolVersion = protocolVersion; this.limits = command.limits(); this.enforceStrictLiveness = command.metadata().enforceStrictLiveness(); this.remaining = limits.count(); this.remainingInPartition = limits.perPartitionCount(); }
protected AbstractQueryPager(ReadCommand command, ProtocolVersion protocolVersion) { this.command = command; this.protocolVersion = protocolVersion; this.limits = command.limits(); this.enforceStrictLiveness = command.metadata().enforceStrictLiveness(); this.remaining = limits.count(); this.remainingInPartition = limits.perPartitionCount(); }
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, int nowInSec, SlicePredicate predicate, ConsistencyLevel consistency_level, ClientState cState, long queryStartNanoTime) throws org.apache.cassandra.exceptions.InvalidRequestException, UnavailableException, TimedOutException { CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family); ThriftValidation.validateColumnParent(metadata, column_parent); ThriftValidation.validatePredicate(metadata, column_parent, predicate); org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level); consistencyLevel.validateForRead(keyspace); List<SinglePartitionReadCommand> commands = new ArrayList<>(keys.size()); ColumnFilter columnFilter = makeColumnFilter(metadata, column_parent, predicate); ClusteringIndexFilter filter = toInternalFilter(metadata, column_parent, predicate); DataLimits limits = getLimits(1, metadata.isSuper() && !column_parent.isSetSuper_column(), predicate); for (ByteBuffer key: keys) { ThriftValidation.validateKey(metadata, key); DecoratedKey dk = metadata.decorateKey(key); commands.add(SinglePartitionReadCommand.create(true, metadata, nowInSec, columnFilter, RowFilter.NONE, limits, dk, filter)); } return getSlice(commands, column_parent.isSetSuper_column(), limits.perPartitionCount(), consistencyLevel, cState, queryStartNanoTime); }
return limits.perPartitionCount(); default: return Integer.MAX_VALUE;
return limits.perPartitionCount(); default: return Integer.MAX_VALUE;
return limits.perPartitionCount(); default: return Integer.MAX_VALUE;
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, int nowInSec, SlicePredicate predicate, ConsistencyLevel consistency_level, ClientState cState, long queryStartNanoTime) throws org.apache.cassandra.exceptions.InvalidRequestException, UnavailableException, TimedOutException { CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family); ThriftValidation.validateColumnParent(metadata, column_parent); ThriftValidation.validatePredicate(metadata, column_parent, predicate); org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level); consistencyLevel.validateForRead(keyspace); List<SinglePartitionReadCommand> commands = new ArrayList<>(keys.size()); ColumnFilter columnFilter = makeColumnFilter(metadata, column_parent, predicate); ClusteringIndexFilter filter = toInternalFilter(metadata, column_parent, predicate); DataLimits limits = getLimits(1, metadata.isSuper() && !column_parent.isSetSuper_column(), predicate); for (ByteBuffer key: keys) { ThriftValidation.validateKey(metadata, key); DecoratedKey dk = metadata.decorateKey(key); commands.add(SinglePartitionReadCommand.create(true, metadata, nowInSec, columnFilter, RowFilter.NONE, limits, dk, filter)); } return getSlice(commands, column_parent.isSetSuper_column(), limits.perPartitionCount(), consistencyLevel, cState, queryStartNanoTime); }
private static void serializeNamesFilter(ReadCommand command, ClusteringIndexNamesFilter filter, DataOutputPlus out) throws IOException { PartitionColumns columns = command.columnFilter().fetchedColumns(); CFMetaData metadata = command.metadata(); SortedSet<Clustering> requestedRows = filter.requestedRows(); if (requestedRows.isEmpty()) { // only static columns are requested out.writeInt(columns.size()); for (ColumnDefinition column : columns) ByteBufferUtil.writeWithShortLength(column.name.bytes, out); } else { out.writeInt(requestedRows.size() * columns.size()); for (Clustering clustering : requestedRows) { for (ColumnDefinition column : columns) ByteBufferUtil.writeWithShortLength(LegacyLayout.encodeCellName(metadata, clustering, column.name.bytes, null), out); } } // countCql3Rows should be true if it's not for Thrift or a DISTINCT query if (command.isForThrift() || (command.limits().kind() == DataLimits.Kind.CQL_LIMIT && command.limits().perPartitionCount() == 1)) out.writeBoolean(false); // it's compact and not a DISTINCT query else out.writeBoolean(true); }
private static void serializeNamesFilter(ReadCommand command, ClusteringIndexNamesFilter filter, DataOutputPlus out) throws IOException { PartitionColumns columns = command.columnFilter().fetchedColumns(); CFMetaData metadata = command.metadata(); SortedSet<Clustering> requestedRows = filter.requestedRows(); if (requestedRows.isEmpty()) { // only static columns are requested out.writeInt(columns.size()); for (ColumnDefinition column : columns) ByteBufferUtil.writeWithShortLength(column.name.bytes, out); } else { out.writeInt(requestedRows.size() * columns.size()); for (Clustering clustering : requestedRows) { for (ColumnDefinition column : columns) ByteBufferUtil.writeWithShortLength(LegacyLayout.encodeCellName(metadata, clustering, column.name.bytes, null), out); } } // countCql3Rows should be true if it's not for Thrift or a DISTINCT query if (command.isForThrift() || (command.limits().kind() == DataLimits.Kind.CQL_LIMIT && command.limits().perPartitionCount() == 1)) out.writeBoolean(false); // it's compact and not a DISTINCT query else out.writeBoolean(true); }
private static void serializeNamesFilter(ReadCommand command, ClusteringIndexNamesFilter filter, DataOutputPlus out) throws IOException { PartitionColumns columns = command.columnFilter().fetchedColumns(); CFMetaData metadata = command.metadata(); SortedSet<Clustering> requestedRows = filter.requestedRows(); if (requestedRows.isEmpty()) { // only static columns are requested out.writeInt(columns.size()); for (ColumnDefinition column : columns) ByteBufferUtil.writeWithShortLength(column.name.bytes, out); } else { out.writeInt(requestedRows.size() * columns.size()); for (Clustering clustering : requestedRows) { for (ColumnDefinition column : columns) ByteBufferUtil.writeWithShortLength(LegacyLayout.encodeCellName(metadata, clustering, column.name.bytes, null), out); } } // countCql3Rows should be true if it's not for Thrift or a DISTINCT query if (command.isForThrift() || (command.limits().kind() == DataLimits.Kind.CQL_LIMIT && command.limits().perPartitionCount() == 1)) out.writeBoolean(false); // it's compact and not a DISTINCT query else out.writeBoolean(true); }
size += LegacyReadCommandSerializer.serializedSlicesSize(filter.requestedSlices(), makeStaticSlice, metadata); size += TypeSizes.sizeof(filter.isReversed()); size += TypeSizes.sizeof(rangeCommand.limits().perPartitionCount()); size += TypeSizes.sizeof(0); // compositesToGroup
size += TypeSizes.sizeof(rangeCommand.limits().perPartitionCount());
size += TypeSizes.sizeof(rangeCommand.limits().perPartitionCount());