private static Row emptyRow(Clustering clustering, DeletionTime deletion) { // Returning null for an empty row is slightly ugly, but the case where there is no pre-existing row is fairly common // (especially when building the view), so we want to avoid a dummy allocation of an empty row every time. // And MultiViewUpdateBuilder knows how to deal with that. return deletion.isLive() ? null : BTreeRow.emptyDeletedRow(clustering, Row.Deletion.regular(deletion)); }
private static Row emptyRow(Clustering clustering, DeletionTime deletion) { // Returning null for an empty row is slightly ugly, but the case where there is no pre-existing row is fairly common // (especially when building the view), so we want to avoid a dummy allocation of an empty row every time. // And MultiViewUpdateBuilder knows how to deal with that. return deletion.isLive() ? null : BTreeRow.emptyDeletedRow(clustering, Row.Deletion.regular(deletion)); }
private static Row emptyRow(Clustering clustering, DeletionTime deletion) { // Returning null for an empty row is slightly ugly, but the case where there is no pre-existing row is fairly common // (especially when building the view), so we want to avoid a dummy allocation of an empty row every time. // And MultiViewUpdateBuilder knows how to deal with that. return deletion.isLive() ? null : BTreeRow.emptyDeletedRow(clustering, Row.Deletion.regular(deletion)); }
private void doDelete(DecoratedKey indexKey, Clustering indexClustering, DeletionTime deletion, OpOrder.Group opGroup) { Row row = BTreeRow.emptyDeletedRow(indexClustering, Row.Deletion.regular(deletion)); PartitionUpdate upd = partitionUpdate(indexKey, row); indexCfs.apply(upd, UpdateTransaction.NO_OP, opGroup, null); logger.trace("Removed index entry for value {}", indexKey); }
private void doDelete(DecoratedKey indexKey, Clustering indexClustering, DeletionTime deletion, OpOrder.Group opGroup) { Row row = BTreeRow.emptyDeletedRow(indexClustering, Row.Deletion.regular(deletion)); PartitionUpdate upd = partitionUpdate(indexKey, row); indexCfs.apply(upd, UpdateTransaction.NO_OP, opGroup, null); logger.trace("Removed index entry for value {}", indexKey); }
private void doDelete(DecoratedKey indexKey, Clustering indexClustering, DeletionTime deletion, OpOrder.Group opGroup) { Row row = BTreeRow.emptyDeletedRow(indexClustering, Row.Deletion.regular(deletion)); PartitionUpdate upd = partitionUpdate(indexKey, row); indexCfs.apply(upd, UpdateTransaction.NO_OP, opGroup, null); logger.trace("Removed index entry for value {}", indexKey); }
public Row readStaticRow() throws IOException { Row row = super.readStaticRow(); if (!row.deletion().isLive()) return BTreeRow.emptyDeletedRow(row.clustering(), row.deletion()); return Rows.EMPTY_STATIC_ROW; }
public Row readStaticRow() throws IOException { Row row = super.readStaticRow(); if (!row.deletion().isLive()) return BTreeRow.emptyDeletedRow(row.clustering(), row.deletion()); return Rows.EMPTY_STATIC_ROW; }
public Row readStaticRow() throws IOException { Row row = super.readStaticRow(); if (!row.deletion().isLive()) return BTreeRow.emptyDeletedRow(row.clustering(), row.deletion()); return Rows.EMPTY_STATIC_ROW; }
public Row readStaticRow() throws IOException { Row row = super.readStaticRow(); if (!row.deletion().isLive()) return BTreeRow.emptyDeletedRow(row.clustering(), row.deletion()); return Rows.EMPTY_STATIC_ROW; }
public Row readStaticRow() throws IOException { if (header.hasStatic()) { Row staticRow = UnfilteredSerializer.serializer.deserializeStaticRow(in, header, helper); if (!staticRow.deletion().isLive()) return BTreeRow.emptyDeletedRow(staticRow.clustering(), staticRow.deletion()); } return Rows.EMPTY_STATIC_ROW; }
public Row readStaticRow() throws IOException { if (header.hasStatic()) { Row staticRow = UnfilteredSerializer.serializer.deserializeStaticRow(in, header, helper); if (!staticRow.deletion().isLive()) return BTreeRow.emptyDeletedRow(staticRow.clustering(), staticRow.deletion()); } return Rows.EMPTY_STATIC_ROW; }
public Row readStaticRow() throws IOException { if (header.hasStatic()) { Row staticRow = UnfilteredSerializer.serializer.deserializeStaticRow(in, header, helper); if (!staticRow.deletion().isLive()) return BTreeRow.emptyDeletedRow(staticRow.clustering(), staticRow.deletion()); } return Rows.EMPTY_STATIC_ROW; }
protected Unfiltered computeNext() { while (true) { Unfiltered unfiltered = super.computeNext(); if (unfiltered == null || unfiltered.isRangeTombstoneMarker()) return unfiltered; Row row = (Row) unfiltered; if (!row.deletion().isLive()) return BTreeRow.emptyDeletedRow(row.clustering(), row.deletion()); // Otherwise read next. } }
public Row readStaticRow() throws IOException { if (header.hasStatic()) { Row staticRow = UnfilteredSerializer.serializer.deserializeStaticRow(in, header, helper); if (!staticRow.deletion().isLive()) return BTreeRow.emptyDeletedRow(staticRow.clustering(), staticRow.deletion()); } return Rows.EMPTY_STATIC_ROW; }
protected Unfiltered computeNext() { while (true) { Unfiltered unfiltered = super.computeNext(); if (unfiltered == null || unfiltered.isRangeTombstoneMarker()) return unfiltered; Row row = (Row) unfiltered; if (!row.deletion().isLive()) return BTreeRow.emptyDeletedRow(row.clustering(), row.deletion()); // Otherwise read next. } }
protected Unfiltered computeNext() { while (true) { Unfiltered unfiltered = super.computeNext(); if (unfiltered == null || unfiltered.isRangeTombstoneMarker()) return unfiltered; Row row = (Row) unfiltered; if (!row.deletion().isLive()) return BTreeRow.emptyDeletedRow(row.clustering(), row.deletion()); // Otherwise read next. } }
protected Unfiltered computeNext() { while (true) { Unfiltered unfiltered = super.computeNext(); if (unfiltered == null || unfiltered.isRangeTombstoneMarker()) return unfiltered; Row row = (Row) unfiltered; if (!row.deletion().isLive()) return BTreeRow.emptyDeletedRow(row.clustering(), row.deletion()); // Otherwise read next. } }
public Row next(Clustering clustering) { if (clustering == Clustering.STATIC_CLUSTERING) return staticRow(current, columns, true); Row row = rawIter.next(clustering); RangeTombstone rt = current.deletionInfo.rangeCovering(clustering); // A search iterator only return a row, so it doesn't allow to directly account for deletion that should apply to to row // (the partition deletion or the deletion of a range tombstone that covers it). So if needs be, reuse the row deletion // to carry the proper deletion on the row. DeletionTime activeDeletion = partitionDeletion; if (rt != null && rt.deletionTime().supersedes(activeDeletion)) activeDeletion = rt.deletionTime(); if (row == null) return activeDeletion.isLive() ? null : BTreeRow.emptyDeletedRow(clustering, Row.Deletion.regular(activeDeletion)); return row.filter(columns, activeDeletion, true, metadata); } };
public Row next(Clustering clustering) { if (clustering == Clustering.STATIC_CLUSTERING) return staticRow(current, columns, true); Row row = rawIter.next(clustering); RangeTombstone rt = current.deletionInfo.rangeCovering(clustering); // A search iterator only return a row, so it doesn't allow to directly account for deletion that should apply to to row // (the partition deletion or the deletion of a range tombstone that covers it). So if needs be, reuse the row deletion // to carry the proper deletion on the row. DeletionTime activeDeletion = partitionDeletion; if (rt != null && rt.deletionTime().supersedes(activeDeletion)) activeDeletion = rt.deletionTime(); if (row == null) return activeDeletion.isLive() ? null : BTreeRow.emptyDeletedRow(clustering, Row.Deletion.regular(activeDeletion)); return row.filter(columns, activeDeletion, true, metadata); } };