public int compareNextTo(ClusteringBound bound) throws IOException { if (!hasNext()) throw new IllegalStateException(); return metadata.comparator.compare(next.clustering(), bound); }
public int compareNextTo(ClusteringBound bound) throws IOException { if (!hasNext()) throw new IllegalStateException(); return metadata.comparator.compare(next.clustering(), bound); }
public int compareNextTo(ClusteringBound bound) throws IOException { if (!hasNext()) throw new IllegalStateException(); return metadata.comparator.compare(next.clustering(), bound); }
public int compareNextTo(ClusteringBound bound) throws IOException { if (!hasNext()) throw new IllegalStateException(); return metadata.comparator.compare(next.clustering(), bound); }
@Override protected Unfiltered computeNext() { Unfiltered ret = super.computeNext(); if (firstItemRetrieved) return ret; // Check that the lower bound is not bigger than the first item retrieved firstItemRetrieved = true; if (lowerBound != null && ret != null) assert comparator().compare(lowerBound, ret.clustering()) <= 0 : String.format("Lower bound [%s ]is bigger than first returned value [%s] for sstable %s", lowerBound.toString(sstable.metadata), ret.toString(sstable.metadata), sstable.getFilename()); return ret; }
@Override protected Unfiltered computeNext() { Unfiltered ret = super.computeNext(); if (firstItemRetrieved) return ret; // Check that the lower bound is not bigger than the first item retrieved firstItemRetrieved = true; if (lowerBound != null && ret != null) assert comparator().compare(lowerBound, ret.clustering()) <= 0 : String.format("Lower bound [%s ]is bigger than first returned value [%s] for sstable %s", lowerBound.toString(sstable.metadata), ret.toString(sstable.metadata), sstable.getFilename()); return ret; }
@Override protected Unfiltered computeNext() { Unfiltered ret = super.computeNext(); if (firstItemRetrieved) return ret; // Check that the lower bound is not bigger than the first item retrieved firstItemRetrieved = true; if (lowerBound != null && ret != null) assert comparator().compare(lowerBound, ret.clustering()) <= 0 : String.format("Lower bound [%s ]is bigger than first returned value [%s] for sstable %s", lowerBound.toString(sstable.metadata), ret.toString(sstable.metadata), sstable.getFilename()); return ret; }
@Override protected Unfiltered computeNext() { Unfiltered ret = super.computeNext(); if (firstItemRetrieved) return ret; // Check that the lower bound is not bigger than the first item retrieved firstItemRetrieved = true; if (lowerBound != null && ret != null) assert comparator().compare(lowerBound, ret.clustering()) <= 0 : String.format("Lower bound [%s ]is bigger than first returned value [%s] for sstable %s", lowerBound.toString(sstable.metadata), ret.toString(sstable.metadata), sstable.getFilename()); return ret; }
@Override public Unfiltered next() { Unfiltered next = nextToOffer != null ? nextToOffer : wrapped.next(); if (next.isRow()) { while (wrapped.hasNext()) { Unfiltered peek = wrapped.next(); if (!peek.isRow() || !next.clustering().equals(peek.clustering())) { nextToOffer = peek; // Offer peek in next call return next; } // Duplicate row, merge it. next = Rows.merge((Row) next, (Row) peek, FBUtilities.nowInSeconds()); } } nextToOffer = null; return next; } }
@Override public Unfiltered next() { Unfiltered next = nextToOffer != null ? nextToOffer : wrapped.next(); if (next.isRow()) { while (wrapped.hasNext()) { Unfiltered peek = wrapped.next(); if (!peek.isRow() || !next.clustering().equals(peek.clustering())) { nextToOffer = peek; // Offer peek in next call return next; } // Duplicate row, merge it. next = Rows.merge((Row) next, (Row) peek, FBUtilities.nowInSeconds()); } } nextToOffer = null; return next; } }
public Collection<Mutation> augment(Partition update) { try { UnfilteredRowIterator it = update.unfilteredIterator(); while (it.hasNext()) { Unfiltered un = it.next(); Clustering clt = (Clustering) un.clustering(); Iterator<Cell> cls = update.getRow(clt).cells().iterator(); while(cls.hasNext()){ Cell cell = cls.next(); String data = new String(cell.value().array()); // If cell type is text } } } catch (Exception e) { ... } return null; }
@Override public Unfiltered next() { Unfiltered next = nextToOffer != null ? nextToOffer : wrapped.next(); if (next.isRow()) { while (wrapped.hasNext()) { Unfiltered peek = wrapped.next(); if (!peek.isRow() || !next.clustering().equals(peek.clustering())) { nextToOffer = peek; // Offer peek in next call return next; } // Duplicate row, merge it. next = Rows.merge((Row) next, (Row) peek, FBUtilities.nowInSeconds()); } } nextToOffer = null; return next; } }
private void add(Unfiltered unfiltered) throws IOException { long pos = currentPosition(); if (firstClustering == null) { // Beginning of an index block. Remember the start and position firstClustering = unfiltered.clustering(); startPosition = pos; } UnfilteredSerializer.serializer.serialize(unfiltered, header, writer, pos - previousRowStart, version); // notify observers about each new row if (!observers.isEmpty()) observers.forEach((o) -> o.nextUnfilteredCluster(unfiltered)); lastClustering = unfiltered.clustering(); previousRowStart = pos; ++written; if (unfiltered.kind() == Unfiltered.Kind.RANGE_TOMBSTONE_MARKER) { RangeTombstoneMarker marker = (RangeTombstoneMarker) unfiltered; openMarker = marker.isOpen(false) ? marker.openDeletionTime(false) : null; } // if we hit the column index size that we have to index after, go ahead and index it. if (currentPosition() - startPosition >= DatabaseDescriptor.getColumnIndexSize()) addIndexBlock(); }
while (it.hasNext()) { Unfiltered un = it.next(); Clustering clt = (Clustering) un.clustering(); Iterator<Cell> cells = partition.getRow(clt).cells().iterator(); Iterator<ColumnDefinition> columns = partition.getRow(clt).columns().iterator();
private void add(Unfiltered unfiltered) throws IOException { long pos = currentPosition(); if (firstClustering == null) { // Beginning of an index block. Remember the start and position firstClustering = unfiltered.clustering(); startPosition = pos; } UnfilteredSerializer.serializer.serialize(unfiltered, header, writer, pos - previousRowStart, version); // notify observers about each new row if (!observers.isEmpty()) observers.forEach((o) -> o.nextUnfilteredCluster(unfiltered)); lastClustering = unfiltered.clustering(); previousRowStart = pos; ++written; if (unfiltered.kind() == Unfiltered.Kind.RANGE_TOMBSTONE_MARKER) { RangeTombstoneMarker marker = (RangeTombstoneMarker) unfiltered; openMarker = marker.isOpen(false) ? marker.openDeletionTime(false) : null; } // if we hit the column index size that we have to index after, go ahead and index it. if (currentPosition() - startPosition >= DatabaseDescriptor.getColumnIndexSize()) addIndexBlock(); }
private void add(Unfiltered unfiltered) throws IOException { long pos = currentPosition(); if (firstClustering == null) { // Beginning of an index block. Remember the start and position firstClustering = unfiltered.clustering(); startPosition = pos; } UnfilteredSerializer.serializer.serialize(unfiltered, header, writer, pos - previousRowStart, version); // notify observers about each new row if (!observers.isEmpty()) observers.forEach((o) -> o.nextUnfilteredCluster(unfiltered)); lastClustering = unfiltered.clustering(); previousRowStart = pos; ++written; if (unfiltered.kind() == Unfiltered.Kind.RANGE_TOMBSTONE_MARKER) { RangeTombstoneMarker marker = (RangeTombstoneMarker) unfiltered; openMarker = marker.isOpen(false) ? marker.openDeletionTime(false) : null; } // if we hit the column index size that we have to index after, go ahead and index it. if (currentPosition() - startPosition >= DatabaseDescriptor.getColumnIndexSize()) addIndexBlock(); }
while (it.hasNext()) { Unfiltered un = it.next(); Clustering clt = (Clustering) un.clustering(); Iterator<Cell> cells = partition.getRow(clt).cells().iterator(); Iterator<ColumnDefinition> columns = partition.getRow(clt).columns().iterator();
private void add(Unfiltered unfiltered) throws IOException { long pos = currentPosition(); if (firstClustering == null) { // Beginning of an index block. Remember the start and position firstClustering = unfiltered.clustering(); startPosition = pos; } UnfilteredSerializer.serializer.serialize(unfiltered, header, writer, pos - previousRowStart, version); // notify observers about each new row if (!observers.isEmpty()) observers.forEach((o) -> o.nextUnfilteredCluster(unfiltered)); lastClustering = unfiltered.clustering(); previousRowStart = pos; ++written; if (unfiltered.kind() == Unfiltered.Kind.RANGE_TOMBSTONE_MARKER) { RangeTombstoneMarker marker = (RangeTombstoneMarker) unfiltered; openMarker = marker.isOpen(false) ? marker.openDeletionTime(false) : null; } // if we hit the column index size that we have to index after, go ahead and index it. if (currentPosition() - startPosition >= DatabaseDescriptor.getColumnIndexSize()) addIndexBlock(); }