@Override public List<MvccLogEntry> load( final ApplicationScope collectionScope, final Id entityId, final UUID version, final int maxSize ) { Preconditions.checkNotNull( collectionScope, "collectionScope is required" ); Preconditions.checkNotNull( entityId, "entity id is required" ); Preconditions.checkNotNull( version, "version is required" ); Preconditions.checkArgument( maxSize > 0, "max Size must be greater than 0" ); ColumnList<UUID> columns; try { final Id applicationId = collectionScope.getApplication(); final ScopedRowKey<K> rowKey = createKey( applicationId, entityId ); columns = keyspace.prepareQuery( CF_ENTITY_LOG ).getKey( rowKey ).withColumnRange( version, null, false, maxSize ) .execute().getResult(); } catch ( ConnectionException e ) { throw new RuntimeException( "Unable to load log entries", e ); } return parseResults( columns, entityId ); }
@Override public Iterator<MvccEntity> loadDescendingHistory( final ApplicationScope applicationScope, final Id entityId, final UUID version, final int fetchSize ) { Preconditions.checkNotNull( applicationScope, "applicationScope is required" ); Preconditions.checkNotNull( entityId, "entity id is required" ); Preconditions.checkNotNull( version, "version is required" ); Preconditions.checkArgument( fetchSize > 0, "max Size must be greater than 0" ); final Id applicationId = applicationScope.getApplication(); final Id ownerId = applicationId; final String collectionName = LegacyScopeUtils.getCollectionScopeNameFromEntityType( entityId.getType() ); final CollectionPrefixedKey<Id> collectionPrefixedKey = new CollectionPrefixedKey<>( collectionName, ownerId, entityId ); final ScopedRowKey<CollectionPrefixedKey<Id>> rowKey = ScopedRowKey.fromKey( applicationId, collectionPrefixedKey ); RowQuery<ScopedRowKey<CollectionPrefixedKey<Id>>, UUID> query = keyspace.prepareQuery( columnFamily ).getKey( rowKey ) .withColumnRange( version, null, false, fetchSize ); return new ColumnNameIterator( query, new MvccColumnParser( entityId, getEntitySerializer() ), false ); }
@Override public Iterator<MvccEntity> loadAscendingHistory( final ApplicationScope applicationScope, final Id entityId, final UUID version, final int fetchSize ) { Preconditions.checkNotNull( applicationScope, "applicationScope is required" ); Preconditions.checkNotNull( entityId, "entity id is required" ); Preconditions.checkNotNull( version, "version is required" ); Preconditions.checkArgument( fetchSize > 0, "max Size must be greater than 0" ); final Id applicationId = applicationScope.getApplication(); final Id ownerId = applicationId; final String collectionName = LegacyScopeUtils.getCollectionScopeNameFromEntityType( entityId.getType() ); final CollectionPrefixedKey<Id> collectionPrefixedKey = new CollectionPrefixedKey<>( collectionName, ownerId, entityId ); final ScopedRowKey<CollectionPrefixedKey<Id>> rowKey = ScopedRowKey.fromKey( applicationId, collectionPrefixedKey ); RowQuery<ScopedRowKey<CollectionPrefixedKey<Id>>, UUID> query = keyspace.prepareQuery( columnFamily ).getKey( rowKey ) .withColumnRange( null, version, true, fetchSize ); return new ColumnNameIterator( query, new MvccColumnParser( entityId, getEntitySerializer() ), false ); }
@Override public List<MvccLogEntry> loadReversed( final ApplicationScope applicationScope, final Id entityId, final UUID minVersion, final int maxSize ) { ColumnList<UUID> columns; try { final Id applicationId = applicationScope.getApplication(); final ScopedRowKey<K> rowKey = createKey( applicationId, entityId ); columns = keyspace.prepareQuery( CF_ENTITY_LOG ).getKey( rowKey ) .withColumnRange( minVersion, null, true, maxSize ).execute().getResult(); } catch ( ConnectionException e ) { throw new RuntimeException( "Unable to load log entries", e ); } return parseResults( columns, entityId ); }
@Override public Iterator<Shard> getShardMetaData( final ApplicationScope scope, final Optional<Shard> start, final DirectedEdgeMeta metaData ) { ValidationUtils.validateApplicationScope( scope ); GraphValidation.validateDirectedEdgeMeta( metaData ); Preconditions.checkNotNull( metaData, "metadata must be present" ); /** * If the edge is present, we need to being seeking from this */ final RangeBuilder rangeBuilder = new RangeBuilder().setLimit( graphFig.getScanPageSize() ); if ( start.isPresent() ) { final Shard shard = start.get(); GraphValidation.valiateShard( shard ); rangeBuilder.setStart( shard.getShardIndex() ); } final ScopedRowKey rowKey = ScopedRowKey.fromKey( scope.getApplication(), metaData ); final RowQuery<ScopedRowKey<DirectedEdgeMeta>, Long> query = keyspace.prepareQuery( EDGE_SHARDS ).setConsistencyLevel( cassandraConfig.getReadCL() ).getKey( rowKey ) .autoPaginate( true ).withColumnRange( rangeBuilder.build() ); return new ColumnNameIterator<>( query, COLUMN_PARSER, false ); }
/** * Get the edge types from the search criteria. * * @param scope The org scope * @param search The edge type search info * @param cf The column family to execute on */ private Iterator<String> getEdgeTypes( final ApplicationScope scope, final SearchEdgeType search, final MultiTenantColumnFamily<ScopedRowKey<Id>, String> cf ) { ValidationUtils.validateApplicationScope( scope ); GraphValidation.validateSearchEdgeType( search ); final ScopedRowKey< Id> sourceKey = new ScopedRowKey<>( scope.getApplication(), search.getNode() ); //resume from the last if specified. Also set the range final RangeBuilder rangeBuilder = createRange( search ); RowQuery<ScopedRowKey<Id>, String> query = keyspace.prepareQuery( cf ).getKey( sourceKey ).autoPaginate( true ) .withColumnRange( rangeBuilder.build() ); return new ColumnNameIterator<>( query, PARSER, search.getLast().isPresent() ); }
/** * Get the id types from the specified column family * * @param scope The organization scope to use * @param search The search criteria * @param cf The column family to search */ public Iterator<String> getIdTypes( final ApplicationScope scope, final SearchIdType search, final MultiTenantColumnFamily<ScopedRowKey<EdgeIdTypeKey>, String> cf ) { ValidationUtils.validateApplicationScope( scope ); GraphValidation.validateSearchEdgeIdType( search ); final ScopedRowKey<EdgeIdTypeKey> sourceTypeKey = new ScopedRowKey<>( scope.getApplication(), new EdgeIdTypeKey( search.getNode(), search.getEdgeType() ) ); final RangeBuilder rangeBuilder = createRange( search ); RowQuery<ScopedRowKey<EdgeIdTypeKey>, String> query = keyspace.prepareQuery( cf ).getKey( sourceTypeKey ).autoPaginate( true ) .withColumnRange( rangeBuilder.build() ); return new ColumnNameIterator<>( query, PARSER, search.getLast().isPresent() ); }
private static ColumnNameIterator<Long, Long> createIterator( final String rowKey, final boolean reversed ) { final ColumnParser<Long, Long> longParser = new ColumnParser<Long, Long>() { @Override public Long parseColumn( final Column<Long> column ) { return column.getName(); } }; final RangeBuilder forwardRange = new RangeBuilder().setLimit( 720 ).setReversed( reversed ); final RowQuery<String, Long> forwardQuery = keyspace.prepareQuery( COLUMN_FAMILY ).getKey( rowKey ).withColumnRange( forwardRange.build() ); ColumnNameIterator<Long, Long> itr = new ColumnNameIterator<>( forwardQuery, longParser, false ); return itr; } }
keyspace.prepareQuery( COLUMN_FAMILY ).getKey( rowKey1 ).withColumnRange( forwardRange.build() ); keyspace.prepareQuery( COLUMN_FAMILY ).getKey( rowKey1 ).withColumnRange( reverseRange.build() );
private void getColumnCountForRowKey(String rowKey, boolean rowDeleted) throws Exception { Integer count = keyspace .prepareQuery(CF_COLUMN_RANGE_TEST) .getKey(rowKey) .withColumnRange("a", "z", false, -1) .getCount() .execute().getResult(); int expectedCount = rowDeleted ? 0 : 26; Assert.assertTrue(count.intValue() == expectedCount); } }
private void readColumnRangeForRowKey(String rowKey, boolean rowDeleted) throws Exception { ColumnList<String> columns = keyspace .prepareQuery(CF_COLUMN_RANGE_TEST) .getKey(rowKey) .withColumnRange("a", "z", false, -1) .execute().getResult(); if (rowDeleted) { Assert.assertTrue(columns.isEmpty()); return; } Assert.assertFalse(columns.isEmpty()); char ch = 'a'; for (Column<String> c : columns) { Assert.assertEquals(String.valueOf(ch), c.getName()); Assert.assertTrue( ch-'a'+1 == c.getIntegerValue()); ch++; } }
@Override public Map<UUID, String> loadSegments(String queue) { Map<UUID, String> resultMap = Maps.newHashMap(); Iterator<Column<UUID>> iter = executePaginated( _keyspace.prepareQuery(CF_DEDUP_MD, ConsistencyLevel.CL_LOCAL_QUORUM) .getKey(queue) .withColumnRange(new RangeBuilder().setLimit(100).build()) .autoPaginate(true)); while (iter.hasNext()) { Column<UUID> column = iter.next(); resultMap.put(column.getName(), column.getStringValue()); } return resultMap; }
@Override public Map<UUID, String> loadSegments(String queue) { Map<UUID, String> resultMap = Maps.newHashMap(); Iterator<Column<UUID>> iter = executePaginated( _keyspace.prepareQuery(CF_DEDUP_MD, ConsistencyLevel.CL_LOCAL_QUORUM) .getKey(queue) .withColumnRange(new RangeBuilder().setLimit(100).build()) .autoPaginate(true)); while (iter.hasNext()) { Column<UUID> column = iter.next(); resultMap.put(column.getName(), column.getStringValue()); } return resultMap; }
@Nullable @Override public ByteBuffer findMinRecord(UUID dataId, @Nullable ByteBuffer from) { // Use a column range with a "start" to skip past tombstones. ColumnList<ByteBuffer> columns = execute(_keyspace.prepareQuery(CF_DEDUP_DATA, ConsistencyLevel.CL_LOCAL_QUORUM) .getKey(dataId) .withColumnRange(new RangeBuilder() .setStart(Objects.firstNonNull(from, EMPTY_BUFFER)) .setLimit(1) .build())); return !columns.isEmpty() ? columns.getColumnByIndex(0).getName() : null; }
@Nullable @Override public ByteBuffer findMinRecord(UUID dataId, @Nullable ByteBuffer from) { // Use a column range with a "start" to skip past tombstones. ColumnList<ByteBuffer> columns = execute(_keyspace.prepareQuery(CF_DEDUP_DATA, ConsistencyLevel.CL_LOCAL_QUORUM) .getKey(dataId) .withColumnRange(new RangeBuilder() .setStart(Objects.firstNonNull(from, EMPTY_BUFFER)) .setLimit(1) .build())); return !columns.isEmpty() ? columns.getColumnByIndex(0).getName() : null; }
private void paginateColumnsForRowKey(String rowKey, boolean rowDeleted, int pageSize) throws Exception { ColumnList<String> columns; RowQuery<String, String> query = keyspace .prepareQuery(TestUtils.CF_COLUMN_RANGE_TEST) .getKey(rowKey) .autoPaginate(true) .withColumnRange( new CqlRangeBuilder<String>().setStart("a") .setFetchSize(pageSize).build()); int count = 1; while (!(columns = query.execute().getResult()).isEmpty()) { Assert.assertTrue(columns.size() <= pageSize); for (Column<String> col : columns) { int value = col.getName().charAt(0) - 'a' + 1; Assert.assertEquals(count, value); count++; } } if (rowDeleted) { Assert.assertTrue(count == 1); } }
@Test public void testColumnRangeSlice() throws ConnectionException { OperationResult<ColumnList<String>> r1 = keyspace .prepareQuery(CF_STANDARD1) .getKey("A") .withColumnRange( new RangeBuilder().setStart("a").setEnd("b") .setLimit(5).build()).execute(); Assert.assertEquals(2, r1.getResult().size()); OperationResult<ColumnList<String>> r2 = keyspace .prepareQuery(CF_STANDARD1).getKey("A") .withColumnRange("a", null, false, 5).execute(); Assert.assertEquals(5, r2.getResult().size()); Assert.assertEquals("a", r2.getResult().getColumnByIndex(0).getName()); ByteBuffer EMPTY_BUFFER = ByteBuffer.wrap(new byte[0]); OperationResult<ColumnList<String>> r3 = keyspace .prepareQuery(CF_STANDARD1).getKey("A") .withColumnRange(EMPTY_BUFFER, EMPTY_BUFFER, true, 5).execute(); Assert.assertEquals(5, r3.getResult().size()); Assert.assertEquals("z", r3.getResult().getColumnByIndex(0).getName()); }
@Timed (name = "bv.emodb.sor.AstyanaxDataReaderDAO.read", absolute = true) @Override public Record read(Key key, ReadConsistency consistency) { checkNotNull(key, "key"); checkNotNull(consistency, "consistency"); AstyanaxTable table = (AstyanaxTable) key.getTable(); AstyanaxStorage storage = table.getReadStorage(); DeltaPlacement placement = (DeltaPlacement) storage.getPlacement(); ByteBuffer rowKey = storage.getRowKey(key.getKey()); // Query for Delta & Compaction info, just the first 50 columns for now. ColumnList<UUID> columns = execute(placement.getKeyspace() .prepareQuery(placement.getDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKey(rowKey) .withColumnRange(_maxColumnsRange), "read record at placement %s, table %s, key %s", placement.getName(), table.getName(), key.getKey()); // Track metrics _randomReadMeter.mark(); // Convert the results into a Record object, lazily fetching the rest of the columns as necessary. return newRecord(key, rowKey, columns, _maxColumnsRange.getLimit(), consistency, null); }
@Timed (name = "bv.emodb.sor.AstyanaxDataReaderDAO.read", absolute = true) @Override public Record read(Key key, ReadConsistency consistency) { checkNotNull(key, "key"); checkNotNull(consistency, "consistency"); AstyanaxTable table = (AstyanaxTable) key.getTable(); AstyanaxStorage storage = table.getReadStorage(); DeltaPlacement placement = (DeltaPlacement) storage.getPlacement(); ByteBuffer rowKey = storage.getRowKey(key.getKey()); // Query for Delta & Compaction info, just the first 50 columns for now. ColumnList<DeltaKey> columns = execute(placement.getKeyspace() .prepareQuery(placement.getBlockedDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKey(rowKey) .withColumnRange(_maxColumnsRange), "read record at placement %s, table %s, key %s", placement.getName(), table.getName(), key.getKey()); // Track metrics _randomReadMeter.mark(); // Convert the results into a Record object, lazily fetching the rest of the columns as necessary. return newRecord(key, rowKey, columns, _maxColumnsRange.getLimit(), consistency, null); }
@Timed (name = "bv.emodb.sor.AstyanaxDataReaderDAO.read", absolute = true) @Override public Record read(Key key, ReadConsistency consistency) { checkNotNull(key, "key"); checkNotNull(consistency, "consistency"); AstyanaxTable table = (AstyanaxTable) key.getTable(); AstyanaxStorage storage = table.getReadStorage(); DeltaPlacement placement = (DeltaPlacement) storage.getPlacement(); ByteBuffer rowKey = storage.getRowKey(key.getKey()); // Query for Delta & Compaction info, just the first 50 columns for now. ColumnList<DeltaKey> columns = execute(placement.getKeyspace() .prepareQuery(placement.getBlockedDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKey(rowKey) .withColumnRange(_maxColumnsRange), "read record at placement %s, table %s, key %s", placement.getName(), table.getName(), key.getKey()); // Track metrics _randomReadMeter.mark(); // Convert the results into a Record object, lazily fetching the rest of the columns as necessary. return newRecord(key, rowKey, columns, _maxColumnsRange.getLimit(), consistency, null); }