public long serializedSize(DataLimits limits, int version, ClusteringComparator comparator) long size = TypeSizes.sizeof((byte) limits.kind().ordinal()); switch (limits.kind()) size += TypeSizes.sizeofUnsignedVInt(cqlLimits.perPartitionLimit); size += TypeSizes.sizeof(cqlLimits.isDistinct); if (limits.kind() == Kind.CQL_PAGING_LIMIT) if (limits.kind() == Kind.CQL_GROUP_BY_PAGING_LIMIT)
public long serializedSize(DataLimits limits, int version, ClusteringComparator comparator) long size = TypeSizes.sizeof((byte) limits.kind().ordinal()); switch (limits.kind()) size += TypeSizes.sizeofUnsignedVInt(cqlLimits.perPartitionLimit); size += TypeSizes.sizeof(cqlLimits.isDistinct); if (limits.kind() == Kind.CQL_PAGING_LIMIT) if (limits.kind() == Kind.CQL_GROUP_BY_PAGING_LIMIT)
public void serialize(DataLimits limits, DataOutputPlus out, int version, ClusteringComparator comparator) throws IOException out.writeByte(limits.kind().ordinal()); switch (limits.kind()) out.writeUnsignedVInt(cqlLimits.perPartitionLimit); out.writeBoolean(cqlLimits.isDistinct); if (limits.kind() == Kind.CQL_PAGING_LIMIT) if (limits.kind() == Kind.CQL_GROUP_BY_PAGING_LIMIT)
public void serialize(DataLimits limits, DataOutputPlus out, int version, ClusteringComparator comparator) throws IOException out.writeByte(limits.kind().ordinal()); switch (limits.kind()) out.writeUnsignedVInt(cqlLimits.perPartitionLimit); out.writeBoolean(cqlLimits.isDistinct); if (limits.kind() == Kind.CQL_PAGING_LIMIT) if (limits.kind() == Kind.CQL_GROUP_BY_PAGING_LIMIT)
public long serializedSize(DataLimits limits, int version, ClusteringComparator comparator) long size = TypeSizes.sizeof((byte) limits.kind().ordinal()); switch (limits.kind()) size += TypeSizes.sizeofUnsignedVInt(cqlLimits.perPartitionLimit); size += TypeSizes.sizeof(cqlLimits.isDistinct); if (limits.kind() == Kind.CQL_PAGING_LIMIT) if (limits.kind() == Kind.CQL_GROUP_BY_PAGING_LIMIT)
public void serialize(DataLimits limits, DataOutputPlus out, int version, ClusteringComparator comparator) throws IOException out.writeByte(limits.kind().ordinal()); switch (limits.kind()) out.writeUnsignedVInt(cqlLimits.perPartitionLimit); out.writeBoolean(cqlLimits.isDistinct); if (limits.kind() == Kind.CQL_PAGING_LIMIT) if (limits.kind() == Kind.CQL_GROUP_BY_PAGING_LIMIT)
return command.columnFilter().fetchedColumns().statics.isEmpty() ? 1 : Integer.MAX_VALUE; switch (limits.kind())
return command.columnFilter().fetchedColumns().statics.isEmpty() ? 1 : Integer.MAX_VALUE; switch (limits.kind())
return command.columnFilter().fetchedColumns().statics.isEmpty() ? 1 : Integer.MAX_VALUE; switch (limits.kind())
private static void serializeNamesFilter(ReadCommand command, ClusteringIndexNamesFilter filter, DataOutputPlus out) throws IOException { PartitionColumns columns = command.columnFilter().fetchedColumns(); CFMetaData metadata = command.metadata(); SortedSet<Clustering> requestedRows = filter.requestedRows(); if (requestedRows.isEmpty()) { // only static columns are requested out.writeInt(columns.size()); for (ColumnDefinition column : columns) ByteBufferUtil.writeWithShortLength(column.name.bytes, out); } else { out.writeInt(requestedRows.size() * columns.size()); for (Clustering clustering : requestedRows) { for (ColumnDefinition column : columns) ByteBufferUtil.writeWithShortLength(LegacyLayout.encodeCellName(metadata, clustering, column.name.bytes, null), out); } } // countCql3Rows should be true if it's not for Thrift or a DISTINCT query if (command.isForThrift() || (command.limits().kind() == DataLimits.Kind.CQL_LIMIT && command.limits().perPartitionCount() == 1)) out.writeBoolean(false); // it's compact and not a DISTINCT query else out.writeBoolean(true); }
private static void serializeNamesFilter(ReadCommand command, ClusteringIndexNamesFilter filter, DataOutputPlus out) throws IOException { PartitionColumns columns = command.columnFilter().fetchedColumns(); CFMetaData metadata = command.metadata(); SortedSet<Clustering> requestedRows = filter.requestedRows(); if (requestedRows.isEmpty()) { // only static columns are requested out.writeInt(columns.size()); for (ColumnDefinition column : columns) ByteBufferUtil.writeWithShortLength(column.name.bytes, out); } else { out.writeInt(requestedRows.size() * columns.size()); for (Clustering clustering : requestedRows) { for (ColumnDefinition column : columns) ByteBufferUtil.writeWithShortLength(LegacyLayout.encodeCellName(metadata, clustering, column.name.bytes, null), out); } } // countCql3Rows should be true if it's not for Thrift or a DISTINCT query if (command.isForThrift() || (command.limits().kind() == DataLimits.Kind.CQL_LIMIT && command.limits().perPartitionCount() == 1)) out.writeBoolean(false); // it's compact and not a DISTINCT query else out.writeBoolean(true); }
private static void serializeNamesFilter(ReadCommand command, ClusteringIndexNamesFilter filter, DataOutputPlus out) throws IOException { PartitionColumns columns = command.columnFilter().fetchedColumns(); CFMetaData metadata = command.metadata(); SortedSet<Clustering> requestedRows = filter.requestedRows(); if (requestedRows.isEmpty()) { // only static columns are requested out.writeInt(columns.size()); for (ColumnDefinition column : columns) ByteBufferUtil.writeWithShortLength(column.name.bytes, out); } else { out.writeInt(requestedRows.size() * columns.size()); for (Clustering clustering : requestedRows) { for (ColumnDefinition column : columns) ByteBufferUtil.writeWithShortLength(LegacyLayout.encodeCellName(metadata, clustering, column.name.bytes, null), out); } } // countCql3Rows should be true if it's not for Thrift or a DISTINCT query if (command.isForThrift() || (command.limits().kind() == DataLimits.Kind.CQL_LIMIT && command.limits().perPartitionCount() == 1)) out.writeBoolean(false); // it's compact and not a DISTINCT query else out.writeBoolean(true); }
private void serializeSliceCommand(SinglePartitionReadCommand command, DataOutputPlus out) throws IOException { CFMetaData metadata = command.metadata(); ClusteringIndexSliceFilter filter = (ClusteringIndexSliceFilter)command.clusteringIndexFilter(); Slices slices = filter.requestedSlices(); boolean makeStaticSlice = !command.columnFilter().fetchedColumns().statics.isEmpty() && !slices.selects(Clustering.STATIC_CLUSTERING); serializeSlices(out, slices, filter.isReversed(), makeStaticSlice, metadata); out.writeBoolean(filter.isReversed()); boolean selectsStatics = !command.columnFilter().fetchedColumns().statics.isEmpty() || slices.selects(Clustering.STATIC_CLUSTERING); DataLimits limits = command.limits(); if (limits.isDistinct()) out.writeInt(1); // the limit is always 1 for DISTINCT queries else out.writeInt(updateLimitForQuery(command.limits().count(), filter.requestedSlices())); int compositesToGroup; if (limits.kind() == DataLimits.Kind.THRIFT_LIMIT || metadata.isDense()) compositesToGroup = -1; else if (limits.isDistinct() && !selectsStatics) compositesToGroup = -2; // for DISTINCT queries (CASSANDRA-8490) else compositesToGroup = metadata.clusteringColumns().size(); out.writeInt(compositesToGroup); }
private void serializeSliceCommand(SinglePartitionReadCommand command, DataOutputPlus out) throws IOException { CFMetaData metadata = command.metadata(); ClusteringIndexSliceFilter filter = (ClusteringIndexSliceFilter)command.clusteringIndexFilter(); Slices slices = filter.requestedSlices(); boolean makeStaticSlice = !command.columnFilter().fetchedColumns().statics.isEmpty() && !slices.selects(Clustering.STATIC_CLUSTERING); serializeSlices(out, slices, filter.isReversed(), makeStaticSlice, metadata); out.writeBoolean(filter.isReversed()); boolean selectsStatics = !command.columnFilter().fetchedColumns().statics.isEmpty() || slices.selects(Clustering.STATIC_CLUSTERING); DataLimits limits = command.limits(); if (limits.isDistinct()) out.writeInt(1); // the limit is always 1 for DISTINCT queries else out.writeInt(updateLimitForQuery(command.limits().count(), filter.requestedSlices())); int compositesToGroup; if (limits.kind() == DataLimits.Kind.THRIFT_LIMIT || metadata.isDense()) compositesToGroup = -1; else if (limits.isDistinct() && !selectsStatics) compositesToGroup = -2; // for DISTINCT queries (CASSANDRA-8490) else compositesToGroup = metadata.clusteringColumns().size(); out.writeInt(compositesToGroup); }
private void serializeSliceCommand(SinglePartitionReadCommand command, DataOutputPlus out) throws IOException { CFMetaData metadata = command.metadata(); ClusteringIndexSliceFilter filter = (ClusteringIndexSliceFilter)command.clusteringIndexFilter(); Slices slices = filter.requestedSlices(); boolean makeStaticSlice = !command.columnFilter().fetchedColumns().statics.isEmpty() && !slices.selects(Clustering.STATIC_CLUSTERING); serializeSlices(out, slices, filter.isReversed(), makeStaticSlice, metadata); out.writeBoolean(filter.isReversed()); boolean selectsStatics = !command.columnFilter().fetchedColumns().statics.isEmpty() || slices.selects(Clustering.STATIC_CLUSTERING); DataLimits limits = command.limits(); if (limits.isDistinct()) out.writeInt(1); // the limit is always 1 for DISTINCT queries else out.writeInt(updateLimitForQuery(command.limits().count(), filter.requestedSlices())); int compositesToGroup; if (limits.kind() == DataLimits.Kind.THRIFT_LIMIT || metadata.isDense()) compositesToGroup = -1; else if (limits.isDistinct() && !selectsStatics) compositesToGroup = -2; // for DISTINCT queries (CASSANDRA-8490) else compositesToGroup = metadata.clusteringColumns().size(); out.writeInt(compositesToGroup); }
if (limits.kind() == DataLimits.Kind.THRIFT_LIMIT) compositesToGroup = -1; else if (limits.isDistinct() && !selectsStatics)
if (limits.kind() == DataLimits.Kind.THRIFT_LIMIT) compositesToGroup = -1; else if (limits.isDistinct() && !selectsStatics)
if (limits.kind() == DataLimits.Kind.THRIFT_LIMIT) compositesToGroup = -1; else if (limits.isDistinct() && !selectsStatics)
DataLimits.Kind kind = rangeCommand.limits().kind(); boolean isDistinct = (kind == DataLimits.Kind.CQL_LIMIT || kind == DataLimits.Kind.CQL_PAGING_LIMIT) && rangeCommand.limits().perPartitionCount() == 1; if (isDistinct)
DataLimits.Kind kind = rangeCommand.limits().kind(); boolean isDistinct = (kind == DataLimits.Kind.CQL_LIMIT || kind == DataLimits.Kind.CQL_PAGING_LIMIT) && rangeCommand.limits().perPartitionCount() == 1; if (isDistinct)