private ColumnFilter makeColumnFilter(CFMetaData metadata, ColumnParent parent, SliceRange range) { if (metadata.isSuper() && parent.isSetSuper_column()) { // We want a slice of the dynamic columns ColumnFilter.Builder builder = ColumnFilter.selectionBuilder(); ColumnDefinition def = metadata.compactValueColumn(); ByteBuffer start = range.reversed ? range.finish : range.start; ByteBuffer finish = range.reversed ? range.start : range.finish; builder.slice(def, start.hasRemaining() ? CellPath.create(start) : CellPath.BOTTOM, finish.hasRemaining() ? CellPath.create(finish) : CellPath.TOP); if (metadata.isDense()) return builder.build(); // We also want to add any staticly defined column if it's within the range AbstractType<?> cmp = metadata.thriftColumnNameType(); for (ColumnDefinition column : metadata.partitionColumns()) { if (SuperColumnCompatibility.isSuperColumnMapColumn(column)) continue; ByteBuffer name = column.name.bytes; if (cmp.compare(name, start) < 0 || cmp.compare(finish, name) > 0) continue; builder.add(column); } return builder.build(); } return makeColumnFilter(metadata, makeSlices(metadata, range)); }
builder.add(staticDef); builder.select(dynamicDef, CellPath.create(column_path.column)); columns = builder.build(); builder.add(cellname.column); builder.add(metadata.compactValueColumn()); columns = builder.build(); filter = new ClusteringIndexNamesFilter(FBUtilities.singleton(Clustering.make(column_path.column), metadata.comparator), false);
builder.add(staticDef); builder.select(dynamicDef, CellPath.create(column_path.column)); columns = builder.build(); builder.add(cellname.column); builder.add(metadata.compactValueColumn()); columns = builder.build(); filter = new ClusteringIndexNamesFilter(FBUtilities.singleton(Clustering.make(column_path.column), metadata.comparator), false);
builder.add(cfm.compactValueColumn()); ByteBuffer end = slice.hasBound(Bound.END) ? slice.bound(Bound.END).bindAndGet(queryOptions) : null; builder.slice(cfm.compactValueColumn(), start == null ? CellPath.BOTTOM : CellPath.create(start), end == null ? CellPath.TOP : CellPath.create(end)); builder.select(cfm.compactValueColumn(), CellPath.create(value)); builder.select(cfm.compactValueColumn(), CellPath.create(value)); builder.select(cfm.compactValueColumn(), CellPath.create(value)); return builder.build();
builder.add(cfm.compactValueColumn()); ByteBuffer end = slice.hasBound(Bound.END) ? slice.bound(Bound.END).bindAndGet(queryOptions) : null; builder.slice(cfm.compactValueColumn(), start == null ? CellPath.BOTTOM : CellPath.create(start), end == null ? CellPath.TOP : CellPath.create(end)); builder.select(cfm.compactValueColumn(), CellPath.create(value)); builder.select(cfm.compactValueColumn(), CellPath.create(value)); builder.select(cfm.compactValueColumn(), CellPath.create(value)); return builder.build();
builder.add(cfm.compactValueColumn()); ByteBuffer end = slice.hasBound(Bound.END) ? slice.bound(Bound.END).bindAndGet(queryOptions) : null; builder.slice(cfm.compactValueColumn(), start == null ? CellPath.BOTTOM : CellPath.create(start), end == null ? CellPath.TOP : CellPath.create(end)); builder.select(cfm.compactValueColumn(), CellPath.create(value)); builder.select(cfm.compactValueColumn(), CellPath.create(value)); builder.select(cfm.compactValueColumn(), CellPath.create(value)); return builder.build();
ColumnFilter.Builder builder = ColumnFilter.selectionBuilder(); if (path == null) builder.add(column); else builder.select(column, path); SinglePartitionReadCommand cmd = SinglePartitionReadCommand.create(cfs.metadata, nowInSec, key, builder.build(), filter); try (ReadExecutionController controller = cmd.executionController(); RowIterator iter = UnfilteredRowIterators.filter(cmd.queryMemtableAndDisk(cfs, controller), nowInSec))
ColumnFilter.Builder builder = ColumnFilter.selectionBuilder(); if (path == null) builder.add(column); else builder.select(column, path); SinglePartitionReadCommand cmd = SinglePartitionReadCommand.create(cfs.metadata, nowInSec, key, builder.build(), filter); try (ReadExecutionController controller = cmd.executionController(); RowIterator iter = UnfilteredRowIterators.filter(cmd.queryMemtableAndDisk(cfs, controller), nowInSec))
ColumnFilter.Builder builder = ColumnFilter.selectionBuilder(); if (path == null) builder.add(column); else builder.select(column, path); SinglePartitionReadCommand cmd = SinglePartitionReadCommand.create(cfs.metadata, nowInSec, key, builder.build(), filter); try (ReadExecutionController controller = cmd.executionController(); RowIterator iter = UnfilteredRowIterators.filter(cmd.queryMemtableAndDisk(cfs, controller), nowInSec))
private ColumnFilter makeColumnFilter(CFMetaData metadata, ColumnParent parent, SliceRange range) { if (metadata.isSuper() && parent.isSetSuper_column()) { // We want a slice of the dynamic columns ColumnFilter.Builder builder = ColumnFilter.selectionBuilder(); ColumnDefinition def = metadata.compactValueColumn(); ByteBuffer start = range.reversed ? range.finish : range.start; ByteBuffer finish = range.reversed ? range.start : range.finish; builder.slice(def, start.hasRemaining() ? CellPath.create(start) : CellPath.BOTTOM, finish.hasRemaining() ? CellPath.create(finish) : CellPath.TOP); if (metadata.isDense()) return builder.build(); // We also want to add any staticly defined column if it's within the range AbstractType<?> cmp = metadata.thriftColumnNameType(); for (ColumnDefinition column : metadata.partitionColumns()) { if (SuperColumnCompatibility.isSuperColumnMapColumn(column)) continue; ByteBuffer name = column.name.bytes; if (cmp.compare(name, start) < 0 || cmp.compare(finish, name) > 0) continue; builder.add(column); } return builder.build(); } return makeColumnFilter(metadata, makeSlices(metadata, range)); }
private void updateWithCurrentValuesFromCFS(List<PartitionUpdate.CounterMark> marks, ColumnFamilyStore cfs) { ColumnFilter.Builder builder = ColumnFilter.selectionBuilder(); BTreeSet.Builder<Clustering> names = BTreeSet.builder(cfs.metadata.comparator); for (PartitionUpdate.CounterMark mark : marks) { if (mark.clustering() != Clustering.STATIC_CLUSTERING) names.add(mark.clustering()); if (mark.path() == null) builder.add(mark.column()); else builder.select(mark.column(), mark.path()); } int nowInSec = FBUtilities.nowInSeconds(); ClusteringIndexNamesFilter filter = new ClusteringIndexNamesFilter(names.build(), false); SinglePartitionReadCommand cmd = SinglePartitionReadCommand.create(cfs.metadata, nowInSec, key(), builder.build(), filter); PeekingIterator<PartitionUpdate.CounterMark> markIter = Iterators.peekingIterator(marks.iterator()); try (ReadExecutionController controller = cmd.executionController(); RowIterator partition = UnfilteredRowIterators.filter(cmd.queryMemtableAndDisk(cfs, controller), nowInSec)) { updateForRow(markIter, partition.staticRow(), cfs); while (partition.hasNext()) { if (!markIter.hasNext()) return; updateForRow(markIter, partition.next(), cfs); } } }
private void updateWithCurrentValuesFromCFS(List<PartitionUpdate.CounterMark> marks, ColumnFamilyStore cfs) { ColumnFilter.Builder builder = ColumnFilter.selectionBuilder(); BTreeSet.Builder<Clustering> names = BTreeSet.builder(cfs.metadata.comparator); for (PartitionUpdate.CounterMark mark : marks) { if (mark.clustering() != Clustering.STATIC_CLUSTERING) names.add(mark.clustering()); if (mark.path() == null) builder.add(mark.column()); else builder.select(mark.column(), mark.path()); } int nowInSec = FBUtilities.nowInSeconds(); ClusteringIndexNamesFilter filter = new ClusteringIndexNamesFilter(names.build(), false); SinglePartitionReadCommand cmd = SinglePartitionReadCommand.create(cfs.metadata, nowInSec, key(), builder.build(), filter); PeekingIterator<PartitionUpdate.CounterMark> markIter = Iterators.peekingIterator(marks.iterator()); try (ReadExecutionController controller = cmd.executionController(); RowIterator partition = UnfilteredRowIterators.filter(cmd.queryMemtableAndDisk(cfs, controller), nowInSec)) { updateForRow(markIter, partition.staticRow(), cfs); while (partition.hasNext()) { if (!markIter.hasNext()) return; updateForRow(markIter, partition.next(), cfs); } } }
private void updateWithCurrentValuesFromCFS(List<PartitionUpdate.CounterMark> marks, ColumnFamilyStore cfs) { ColumnFilter.Builder builder = ColumnFilter.selectionBuilder(); BTreeSet.Builder<Clustering> names = BTreeSet.builder(cfs.metadata.comparator); for (PartitionUpdate.CounterMark mark : marks) { if (mark.clustering() != Clustering.STATIC_CLUSTERING) names.add(mark.clustering()); if (mark.path() == null) builder.add(mark.column()); else builder.select(mark.column(), mark.path()); } int nowInSec = FBUtilities.nowInSeconds(); ClusteringIndexNamesFilter filter = new ClusteringIndexNamesFilter(names.build(), false); SinglePartitionReadCommand cmd = SinglePartitionReadCommand.create(cfs.metadata, nowInSec, key(), builder.build(), filter); PeekingIterator<PartitionUpdate.CounterMark> markIter = Iterators.peekingIterator(marks.iterator()); try (ReadExecutionController controller = cmd.executionController(); RowIterator partition = UnfilteredRowIterators.filter(cmd.queryMemtableAndDisk(cfs, controller), nowInSec)) { updateForRow(markIter, partition.staticRow(), cfs); while (partition.hasNext()) { if (!markIter.hasNext()) return; updateForRow(markIter, partition.next(), cfs); } } }
/** * Creates a PartitionUpdate from a partition containing some schema table content. * This is mainly calling {@code PartitionUpdate.fromIterator} except for the fact that it deals with * the problem described in #12236. */ private static PartitionUpdate makeUpdateForSchema(UnfilteredRowIterator partition, ColumnFilter filter) { // This method is used during schema migration tasks, and if cdc is disabled, we want to force excluding the // 'cdc' column from the TABLES/VIEWS schema table because it is problematic if received by older nodes (see #12236 // and #12697). Otherwise though, we just simply "buffer" the content of the partition into a PartitionUpdate. if (DatabaseDescriptor.isCDCEnabled() || !TABLES_WITH_CDC_ADDED.contains(partition.metadata().cfName)) return PartitionUpdate.fromIterator(partition, filter); // We want to skip the 'cdc' column. A simple solution for that is based on the fact that // 'PartitionUpdate.fromIterator()' will ignore any columns that are marked as 'fetched' but not 'queried'. ColumnFilter.Builder builder = ColumnFilter.allColumnsBuilder(partition.metadata()); for (ColumnDefinition column : filter.fetchedColumns()) { if (!column.name.toString().equals("cdc")) builder.add(column); } return PartitionUpdate.fromIterator(partition, builder.build()); }
/** * Creates a PartitionUpdate from a partition containing some schema table content. * This is mainly calling {@code PartitionUpdate.fromIterator} except for the fact that it deals with * the problem described in #12236. */ private static PartitionUpdate makeUpdateForSchema(UnfilteredRowIterator partition, ColumnFilter filter) { // This method is used during schema migration tasks, and if cdc is disabled, we want to force excluding the // 'cdc' column from the TABLES/VIEWS schema table because it is problematic if received by older nodes (see #12236 // and #12697). Otherwise though, we just simply "buffer" the content of the partition into a PartitionUpdate. if (DatabaseDescriptor.isCDCEnabled() || !TABLES_WITH_CDC_ADDED.contains(partition.metadata().cfName)) return PartitionUpdate.fromIterator(partition, filter); // We want to skip the 'cdc' column. A simple solution for that is based on the fact that // 'PartitionUpdate.fromIterator()' will ignore any columns that are marked as 'fetched' but not 'queried'. ColumnFilter.Builder builder = ColumnFilter.allColumnsBuilder(partition.metadata()); for (ColumnDefinition column : filter.fetchedColumns()) { if (!column.name.toString().equals("cdc")) builder.add(column); } return PartitionUpdate.fromIterator(partition, builder.build()); }