private Iterator<Cell> simpleCellsIterator(Row row) { final Iterator<Cell> cells = row.cells().iterator(); return new AbstractIterator<Cell>() { protected Cell computeNext() { if (cells.hasNext()) { Cell cell = cells.next(); if (cell.column().isSimple()) return cell; } return endOfData(); } }; } }
public Row apply(Row existing, Row update) { Row.Builder builder = builder(existing.clustering()); colUpdateTimeDelta = Math.min(colUpdateTimeDelta, Rows.merge(existing, update, builder, nowInSec)); Row reconciled = builder.build(); indexer.onUpdated(existing, reconciled); dataSize += reconciled.dataSize() - existing.dataSize(); heapSize += reconciled.unsharedHeapSizeExcludingData() - existing.unsharedHeapSizeExcludingData(); if (inserted == null) inserted = new ArrayList<>(); inserted.add(reconciled); return reconciled; }
@Override public void collect(Row inRow, Row outRow) { try { if (logger.isTraceEnabled()) { if (inRow != null) logger.trace("indexer={} newRowData={} clustering={} static={} hasLiveData={}", WideRowcumentIndexer.this.hashCode(), inRow, inRow.clustering(), inRow.isStatic(), inRow.hasLiveData(nowInSec, baseCfs.metadata.enforceStrictLiveness())); if (outRow != null) logger.trace("indexer={} oldRowData={} clustering={} static={} hasLiveData={}", WideRowcumentIndexer.this.hashCode(), outRow, outRow.clustering(), outRow.isStatic(), outRow.hasLiveData(nowInSec, baseCfs.metadata.enforceStrictLiveness())); } if (inRow.isStatic()) { inStaticRow = inRow; outStaticRow = outRow; } else { clusterings.add(inRow.clustering()); rowcuments.put(inRow.clustering(), new WideRowcument(inRow, outRow)); } } catch(Throwable t) { logger.error("Unexpected error", t); } }
public Row readStaticRow() throws IOException { Row row = super.readStaticRow(); if (!row.deletion().isLive()) return BTreeRow.emptyDeletedRow(row.clustering(), row.deletion()); return Rows.EMPTY_STATIC_ROW; }
int nowInSec) Clustering clustering = existing.clustering(); builder.newRow(clustering); LivenessInfo existingInfo = existing.primaryKeyLivenessInfo(); LivenessInfo updateInfo = update.primaryKeyLivenessInfo(); LivenessInfo mergedInfo = existingInfo.supersedes(updateInfo) ? existingInfo : updateInfo; Row.Deletion rowDeletion = existing.deletion().supersedes(update.deletion()) ? existing.deletion() : update.deletion(); Iterator<ColumnData> a = existing.iterator(); Iterator<ColumnData> b = update.iterator(); ColumnData nexta = a.hasNext() ? a.next() : null, nextb = b.hasNext() ? b.next() : null; while (nexta != null | nextb != null)
throws IOException boolean isStatic = row.isStatic(); LivenessInfo pkLiveness = row.primaryKeyLivenessInfo(); Row.Deletion deletion = row.deletion(); row.apply(cd -> {
assert !row.isEmpty(); collector.update(row.primaryKeyLivenessInfo()); collector.update(row.deletion().time()); final WrappedInt cellCount = new WrappedInt(0); row.apply(cd -> { if (cd.column().isSimple())
@Override public boolean equals(Object other) { if(!(other instanceof Row)) return false; Row that = (Row)other; if (!this.clustering().equals(that.clustering()) || !this.primaryKeyLivenessInfo().equals(that.primaryKeyLivenessInfo()) || !this.deletion().equals(that.deletion())) return false; return Iterables.elementsEqual(this, that); }
if (row.isEmpty()) return; if (row.isStatic()) assert columns().statics.containsAll(row.columns()) : columns().statics + " is not superset of " + row.columns(); Row staticRow = holder.staticRow.isEmpty() ? row : Rows.merge(holder.staticRow, row, createdAtInSec); assert columns().regulars.containsAll(row.columns()) : columns().regulars + " is not superset of " + row.columns(); rowBuilder.add(row);
continue; if (row.primaryKeyLivenessInfo().supersedes(rowInfo)) rowInfo = row.primaryKeyLivenessInfo(); if (row.deletion().supersedes(rowDeletion)) rowDeletion = row.deletion(); columnDataIterators.add(row == null ? Collections.emptyIterator() : row.iterator());
inRowDataSize = inRow != null ? inRow.dataSize() : 0; Row row = inRow != null ? inRow : outRow; this.isStatic = row.isStatic(); this.hasLiveData = inRow != null && inRow.hasLiveData(nowInSec, baseCfs.metadata.enforceStrictLiveness()); if (!row.isStatic() && row.clustering().size() > 0) { int i=0; for(ColumnDefinition ccd : baseCfs.metadata.clusteringColumns()) { Object value = ClusterService.deserialize(ccd.type, row.clustering().get(i)); pkCols[baseCfs.metadata.partitionKeyColumns().size()+i] = value; if (indexedPkColumns[baseCfs.metadata.partitionKeyColumns().size()+i])
public static RowMark create(CFMetaData metadata, Row row, ProtocolVersion protocolVersion) { ByteBuffer mark; if (protocolVersion.isSmallerOrEqualTo(ProtocolVersion.V3)) { // We need to be backward compatible with 2.1/2.2 nodes paging states. Which means we have to send // the full cellname of the "last" cell in the row we get (since that's how 2.1/2.2 nodes will start after // that last row if they get that paging state). Iterator<Cell> cells = row.cellsInLegacyOrder(metadata, true).iterator(); if (!cells.hasNext()) { // If the last returned row has no cell, this means in 2.1/2.2 terms that we stopped on the row // marker. Note that this shouldn't happen if the table is COMPACT. assert !metadata.isCompactTable(); mark = LegacyLayout.encodeCellName(metadata, row.clustering(), ByteBufferUtil.EMPTY_BYTE_BUFFER, null); } else { Cell cell = cells.next(); mark = LegacyLayout.encodeCellName(metadata, row.clustering(), cell.column().name.bytes, cell.column().isComplex() ? cell.path().get(0) : null); } } else { // We froze the serialization version to 3.0 as we need to make this this doesn't change (that is, it has to be // fix for a given version of the protocol). mark = Clustering.serializer.serialize(row.clustering(), MessagingService.VERSION_30, makeClusteringTypes(metadata)); } return new RowMark(mark, protocolVersion); }
public boolean isStale(Row data, ByteBuffer indexValue, int nowInSec) { return !data.hasLiveData(nowInSec, enforceStrictLiveness); } }
private long serializedSize(Row row, SerializationHeader header, long previousUnfilteredSize, int version) { long size = 1; // flags if (hasExtendedFlags(row)) size += 1; // extended flags if (!row.isStatic()) size += Clustering.serializer.serializedSize(row.clustering(), version, header.clusteringTypes()); return size + serializedRowBodySize(row, header, previousUnfilteredSize, version); }
public static UnfilteredRowIterator unfilteredRow(CFMetaData metadata, DecoratedKey partitionKey, boolean isReverseOrder, Row staticRow, DeletionTime partitionDeletion) { PartitionColumns columns = PartitionColumns.NONE; if (!staticRow.isEmpty()) columns = new PartitionColumns(Columns.from(staticRow.columns()), Columns.NONE); else staticRow = Rows.EMPTY_STATIC_ROW; if (partitionDeletion.isLive()) partitionDeletion = DeletionTime.LIVE; return new EmptyUnfilteredRowIterator(columns, metadata, partitionKey, isReverseOrder, staticRow, partitionDeletion); }
private LivenessInfo getPrimaryKeyIndexLiveness(Row row) { long timestamp = row.primaryKeyLivenessInfo().timestamp(); int ttl = row.primaryKeyLivenessInfo().ttl(); for (Cell cell : row.cells()) { long cellTimestamp = cell.timestamp(); if (cell.isLive(nowInSec)) { if (cellTimestamp > timestamp) { timestamp = cellTimestamp; ttl = cell.ttl(); } } } return LivenessInfo.create(timestamp, ttl, nowInSec); } };
/** * Creates an immutable partition update that contains a single row update. * * @param metadata the metadata for the created update. * @param key the partition key for the partition to update. * @param row the row for the update. * * @return the newly created partition update containing only {@code row}. */ public static PartitionUpdate singleRowUpdate(CFMetaData metadata, DecoratedKey key, Row row) { MutableDeletionInfo deletionInfo = MutableDeletionInfo.live(); if (row.isStatic()) { Holder holder = new Holder(new PartitionColumns(Columns.from(row.columns()), Columns.NONE), BTree.empty(), deletionInfo, row, EncodingStats.NO_STATS); return new PartitionUpdate(metadata, key, holder, deletionInfo, false); } else { Holder holder = new Holder(new PartitionColumns(Columns.NONE, Columns.from(row.columns())), BTree.singleton(row), deletionInfo, Rows.EMPTY_STATIC_ROW, EncodingStats.NO_STATS); return new PartitionUpdate(metadata, key, holder, deletionInfo, false); } }
public void serializeStaticRow(Row row, SerializationHeader header, DataOutputPlus out, int version) throws IOException { assert row.isStatic(); serialize(row, header, out, 0, version); }
@Override public Row applyToRow(Row row) { lastRowClustering = row.clustering(); return row; }