private V readValueCQL(CacheScope scope, K key, TypeReference typeRef){ Preconditions.checkNotNull(scope, "scope is required"); Preconditions.checkNotNull(key, "key is required"); final String rowKeyString = scope.getApplication().getUuid().toString(); final int bucket = BUCKET_LOCATOR.getCurrentBucket(rowKeyString); // determine column name based on K key to string final String columnName = key.toString(); final Clause inKey = QueryBuilder.eq("key", getPartitionKey(scope, rowKeyString, bucket) ); final Clause inColumn = QueryBuilder.eq("column1", DataType.text().serialize(columnName, ProtocolVersion.NEWEST_SUPPORTED) ); final Statement statement = QueryBuilder.select().all().from(SCOPED_CACHE_TABLE) .where(inKey) .and(inColumn) .setConsistencyLevel(cassandraConfig.getDataStaxReadCl()); final ResultSet resultSet = session.execute(statement); final com.datastax.driver.core.Row row = resultSet.one(); if (row == null){ if(logger.isDebugEnabled()){ logger.debug("Cache value not found for key {}", key ); } return null; } try { return MAPPER.readValue(row.getBytes("value").array(), typeRef); } catch (IOException ioe) { logger.error("Unable to read cached value", ioe); throw new RuntimeException("Unable to read cached value", ioe); } }
@Override public Map<String, Object> getTokenInfo(UUID tokenUUID){ Preconditions.checkNotNull(tokenUUID, "token UUID is required"); List<ByteBuffer> tokenProperties = new ArrayList<>(); TOKEN_PROPERTIES.forEach( prop -> tokenProperties.add(DataType.serializeValue(prop, ProtocolVersion.NEWEST_SUPPORTED))); final ByteBuffer key = DataType.uuid().serialize(tokenUUID, ProtocolVersion.NEWEST_SUPPORTED); final Clause inKey = QueryBuilder.eq("key", key); final Clause inColumn = QueryBuilder.in("column1", tokenProperties ); final Statement statement = QueryBuilder.select().all().from(TOKENS_TABLE) .where(inKey) .and(inColumn) .setConsistencyLevel(cassandraConfig.getDataStaxReadCl()); final ResultSet resultSet = session.execute(statement); final List<Row> rows = resultSet.all(); Map<String, Object> tokenInfo = new HashMap<>(); rows.forEach( row -> { final String name = (String)DataType.text() .deserialize(row.getBytes("column1"), ProtocolVersion.NEWEST_SUPPORTED); final Object value = deserializeColumnValue(name, row.getBytes("value")); if (value == null){ throw new RuntimeException("error deserializing token info for property: "+name); } tokenInfo.put(name, value); }); logger.trace("getTokenInfo, info: {}", tokenInfo); return tokenInfo; }
.setConsistencyLevel(consistencyLevel);
private Record read(Key key, ByteBuffer rowKey, ReadConsistency consistency, DeltaPlacement placement) { checkNotNull(key, "key"); checkNotNull(consistency, "consistency"); TableDDL tableDDL = placement.getDeltaTableDDL(); Statement statement = selectFrom(tableDDL) .where(eq(tableDDL.getRowKeyColumnName(), rowKey)) .setConsistencyLevel(SorConsistencies.toCql(consistency)); // Track metrics _randomReadMeter.mark(); Iterator<Iterable<Row>> groupedRows = deltaQuery(placement, statement, true, "Failed to read record %s", key); Iterable<Row> rows; if (groupedRows.hasNext()) { rows = groupedRows.next(); } else { rows = ImmutableList.of(); } // Convert the results into a Record object, lazily fetching the rest of the columns as necessary. return newRecordFromCql(key, rows); }
private Record read(Key key, ByteBuffer rowKey, ReadConsistency consistency, DeltaPlacement placement) { checkNotNull(key, "key"); checkNotNull(consistency, "consistency"); TableDDL tableDDL = placement.getDeltaTableDDL(); Statement statement = selectFrom(tableDDL) .where(eq(tableDDL.getRowKeyColumnName(), rowKey)) .setConsistencyLevel(SorConsistencies.toCql(consistency)); // Track metrics _randomReadMeter.mark(); Iterator<Iterable<Row>> groupedRows = deltaQuery(placement, statement, true, "Failed to read record %s", key); Iterable<Row> rows; if (groupedRows.hasNext()) { rows = groupedRows.next(); } else { rows = ImmutableList.of(); } // Convert the results into a Record object, lazily fetching the rest of the columns as necessary. return newRecordFromCql(key, rows); }
/** * Scans a range of keys and returns an iterator containing each row's columns as an iterable. */ private Iterator<Iterable<Row>> rowScan(DeltaPlacement placement, @Nullable AstyanaxTable table, ByteBufferRange keyRange, ReadConsistency consistency) { ByteBuffer startToken = keyRange.getStart(); ByteBuffer endToken = keyRange.getEnd(); // Note: if Cassandra is asked to perform a token range query where start >= end it will wrap // around which is absolutely *not* what we want. checkArgument(AstyanaxStorage.compareKeys(startToken, endToken) < 0, "Cannot scan rows which loop from maximum- to minimum-token"); TableDDL tableDDL = placement.getDeltaTableDDL(); Statement statement = selectFrom(tableDDL) .where(gt(token(tableDDL.getRowKeyColumnName()), startToken)) .and(lte(token(tableDDL.getRowKeyColumnName()), endToken)) .setConsistencyLevel(SorConsistencies.toCql(consistency)); return deltaQueryAsync(placement, statement, false, "Failed to scan token range [%s, %s] for %s", ByteBufferUtil.bytesToHex(startToken), ByteBufferUtil.bytesToHex(endToken), table != null ? table : "multiple tables"); }
private Record read(Key key, ByteBuffer rowKey, ReadConsistency consistency, DeltaPlacement placement) { checkNotNull(key, "key"); checkNotNull(consistency, "consistency"); BlockedDeltaTableDDL tableDDL = placement.getBlockedDeltaTableDDL(); Statement statement = selectDeltaFrom(tableDDL) .where(eq(tableDDL.getRowKeyColumnName(), rowKey)) .setConsistencyLevel(SorConsistencies.toCql(consistency)); // Track metrics _randomReadMeter.mark(); Iterator<Iterable<Row>> groupedRows = deltaQuery(placement, statement, true, "Failed to read record %s", key); Iterable<Row> rows; if (groupedRows.hasNext()) { rows = groupedRows.next(); } else { rows = ImmutableList.of(); } // Convert the results into a Record object, lazily fetching the rest of the columns as necessary. return newRecordFromCql(key, rows, placement, ByteBufferUtil.bytesToHex(rowKey)); }
private Iterator<Iterable<Row>> migrationScan(DeltaPlacement placement, ByteBufferRange keyRange, ReadConsistency consistency) { ByteBuffer startToken = keyRange.getStart(); ByteBuffer endToken = keyRange.getEnd(); // Note: if Cassandra is asked to perform a token range query where start >= end it will wrap // around which is absolutely *not* what we want. checkArgument(AstyanaxStorage.compareKeys(startToken, endToken) < 0, "Cannot migrate rows which loop from maximum- to minimum-token"); TableDDL tableDDL = placement.getDeltaTableDDL(); // Our query needs to be inclusive on both sides so that we ensure that we get all records in the event of a re-split Statement statement = selectFrom(tableDDL) .where(gte(token(tableDDL.getRowKeyColumnName()), startToken)) .and(lte(token(tableDDL.getRowKeyColumnName()), endToken)) .setConsistencyLevel(SorConsistencies.toCql(consistency)); return deltaQueryAsync(placement, statement, false, "Failed to scan (for migration) token range [%s, %s] for %s", ByteBufferUtil.bytesToHex(startToken), ByteBufferUtil.bytesToHex(endToken), "multiple tables"); }
/** * Scans a range of keys and returns an iterator containing each row's columns as an iterable. */ private Iterator<Iterable<Row>> rowScan(DeltaPlacement placement, @Nullable AstyanaxTable table, ByteBufferRange keyRange, ReadConsistency consistency) { ByteBuffer startToken = keyRange.getStart(); ByteBuffer endToken = keyRange.getEnd(); // Note: if Cassandra is asked to perform a token range query where start >= end it will wrap // around which is absolutely *not* what we want. checkArgument(AstyanaxStorage.compareKeys(startToken, endToken) < 0, "Cannot scan rows which loop from maximum- to minimum-token"); TableDDL tableDDL = placement.getDeltaTableDDL(); Statement statement = selectFrom(tableDDL) .where(gt(token(tableDDL.getRowKeyColumnName()), startToken)) .and(lte(token(tableDDL.getRowKeyColumnName()), endToken)) .setConsistencyLevel(SorConsistencies.toCql(consistency)); return deltaQueryAsync(placement, statement, false, "Failed to scan token range [%s, %s] for %s", ByteBufferUtil.bytesToHex(startToken), ByteBufferUtil.bytesToHex(endToken), table != null ? table : "multiple tables"); }
/** * Scans a range of keys and returns an iterator containing each row's columns as an iterable. */ private Iterator<Iterable<Row>> rowScan(DeltaPlacement placement, @Nullable AstyanaxTable table, ByteBufferRange keyRange, ReadConsistency consistency) { ByteBuffer startToken = keyRange.getStart(); ByteBuffer endToken = keyRange.getEnd(); // Note: if Cassandra is asked to perform a token range query where start >= end it will wrap // around which is absolutely *not* what we want. checkArgument(AstyanaxStorage.compareKeys(startToken, endToken) < 0, "Cannot scan rows which loop from maximum- to minimum-token"); BlockedDeltaTableDDL tableDDL = placement.getBlockedDeltaTableDDL(); Statement statement = selectDeltaFrom(tableDDL) .where(gt(token(tableDDL.getRowKeyColumnName()), startToken)) .and(lte(token(tableDDL.getRowKeyColumnName()), endToken)) .setConsistencyLevel(SorConsistencies.toCql(consistency)); return deltaQueryAsync(placement, statement, false, "Failed to scan token range [%s, %s] for %s", ByteBufferUtil.bytesToHex(startToken), ByteBufferUtil.bytesToHex(endToken), table != null ? table : "multiple tables"); }
private Record read(Key key, ByteBuffer rowKey, ReadConsistency consistency, DeltaPlacement placement) { checkNotNull(key, "key"); checkNotNull(consistency, "consistency"); BlockedDeltaTableDDL tableDDL = placement.getBlockedDeltaTableDDL(); Statement statement = selectDeltaFrom(tableDDL) .where(eq(tableDDL.getRowKeyColumnName(), rowKey)) .setConsistencyLevel(SorConsistencies.toCql(consistency)); // Track metrics _randomReadMeter.mark(); Iterator<Iterable<Row>> groupedRows = deltaQuery(placement, statement, true, "Failed to read record %s", key); Iterable<Row> rows; if (groupedRows.hasNext()) { rows = groupedRows.next(); } else { rows = ImmutableList.of(); } // Convert the results into a Record object, lazily fetching the rest of the columns as necessary. return newRecordFromCql(key, rows, placement, ByteBufferUtil.bytesToHex(rowKey)); }
/** * Scans a range of keys and returns an iterator containing each row's columns as an iterable. */ private Iterator<Iterable<Row>> rowScan(DeltaPlacement placement, @Nullable AstyanaxTable table, ByteBufferRange keyRange, ReadConsistency consistency) { ByteBuffer startToken = keyRange.getStart(); ByteBuffer endToken = keyRange.getEnd(); // Note: if Cassandra is asked to perform a token range query where start >= end it will wrap // around which is absolutely *not* what we want. checkArgument(AstyanaxStorage.compareKeys(startToken, endToken) < 0, "Cannot scan rows which loop from maximum- to minimum-token"); BlockedDeltaTableDDL tableDDL = placement.getBlockedDeltaTableDDL(); Statement statement = selectDeltaFrom(tableDDL) .where(gt(token(tableDDL.getRowKeyColumnName()), startToken)) .and(lte(token(tableDDL.getRowKeyColumnName()), endToken)) .setConsistencyLevel(SorConsistencies.toCql(consistency)); return deltaQueryAsync(placement, statement, false, "Failed to scan token range [%s, %s] for %s", ByteBufferUtil.bytesToHex(startToken), ByteBufferUtil.bytesToHex(endToken), table != null ? table : "multiple tables"); }
@Timed(name = "bv.emodb.databus.CqlSubscriptionDAO.getSubscription", absolute = true) @Override public OwnedSubscription getSubscription(String subscription) { ResultSet resultSet = _keyspace.getCqlSession().execute( select(subscriptionNameColumn(), subscriptionColumn()) .from(CF_NAME) .where(eq(rowkeyColumn(), ROW_KEY)) .and(eq(subscriptionNameColumn(), subscription)) .setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM)); Row row = resultSet.one(); if (row == null) { return null; } return rowToOwnedSubscription(row); }
private Iterator<Iterable<Row>> migrationScan(DeltaPlacement placement, ByteBufferRange keyRange, ReadConsistency consistency) { ByteBuffer startToken = keyRange.getStart(); ByteBuffer endToken = keyRange.getEnd(); // Note: if Cassandra is asked to perform a token range query where start >= end it will wrap // around which is absolutely *not* what we want. checkArgument(AstyanaxStorage.compareKeys(startToken, endToken) < 0, "Cannot migrate rows which loop from maximum- to minimum-token"); TableDDL tableDDL = placement.getDeltaTableDDL(); // Our query needs to be inclusive on both sides so that we ensure that we get all records in the event of a re-split Statement statement = selectFrom(tableDDL) .where(gte(token(tableDDL.getRowKeyColumnName()), startToken)) .and(lte(token(tableDDL.getRowKeyColumnName()), endToken)) .setConsistencyLevel(SorConsistencies.toCql(consistency)); return deltaQueryAsync(placement, statement, false, "Failed to scan (for migration) token range [%s, %s] for %s", ByteBufferUtil.bytesToHex(startToken), ByteBufferUtil.bytesToHex(endToken), "multiple tables"); }
@Timed(name = "bv.emodb.databus.CqlSubscriptionDAO.getSubscription", absolute = true) @Override public OwnedSubscription getSubscription(String subscription) { ResultSet resultSet = _keyspace.getCqlSession().execute( select(subscriptionNameColumn(), subscriptionColumn()) .from(CF_NAME) .where(eq(rowkeyColumn(), ROW_KEY)) .and(eq(subscriptionNameColumn(), subscription)) .setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM)); Row row = resultSet.one(); if (row == null) { return null; } return rowToOwnedSubscription(row); }
@Timed(name = "bv.emodb.databus.CqlSubscriptionDAO.getAllSubscriptionNames", absolute = true) @Override public Iterable<String> getAllSubscriptionNames() { return () -> { ResultSet resultSet = _keyspace.getCqlSession().execute( select(subscriptionNameColumn()) .from(CF_NAME) .where(eq(rowkeyColumn(), ROW_KEY)) .setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM) .setFetchSize(5000)); return StreamSupport.stream(resultSet.spliterator(), false).map(row -> row.getString(0)).iterator(); }; }
@Timed(name = "bv.emodb.databus.CqlSubscriptionDAO.getAllSubscriptions", absolute = true) @Override public Iterable<OwnedSubscription> getAllSubscriptions() { return () -> { ResultSet resultSet = _keyspace.getCqlSession().execute( select(subscriptionNameColumn(), subscriptionColumn()) .from(CF_NAME) .where(eq(rowkeyColumn(), ROW_KEY)) .setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM) .setFetchSize(200)); return StreamSupport.stream(resultSet.spliterator(), false).map(this::rowToOwnedSubscription).iterator(); }; }
@Timed(name = "bv.emodb.databus.CqlSubscriptionDAO.getAllSubscriptionNames", absolute = true) @Override public Iterable<String> getAllSubscriptionNames() { return () -> { ResultSet resultSet = _keyspace.getCqlSession().execute( select(subscriptionNameColumn()) .from(CF_NAME) .where(eq(rowkeyColumn(), ROW_KEY)) .setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM) .setFetchSize(5000)); return StreamSupport.stream(resultSet.spliterator(), false).map(row -> row.getString(0)).iterator(); }; }
@Timed(name = "bv.emodb.databus.CqlSubscriptionDAO.getAllSubscriptions", absolute = true) @Override public Iterable<OwnedSubscription> getAllSubscriptions() { return () -> { ResultSet resultSet = _keyspace.getCqlSession().execute( select(subscriptionNameColumn(), subscriptionColumn()) .from(CF_NAME) .where(eq(rowkeyColumn(), ROW_KEY)) .setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM) .setFetchSize(200)); return StreamSupport.stream(resultSet.spliterator(), false).map(this::rowToOwnedSubscription).iterator(); }; }
private ByteBuffer getValueCQL( MapScope scope, String key, final ConsistencyLevel consistencyLevel ) { Clause in = QueryBuilder.in("key", getMapEntryPartitionKey(scope, key) ); Statement statement = QueryBuilder.select().all().from(MAP_ENTRIES_TABLE) .where(in) .setConsistencyLevel(consistencyLevel); ResultSet resultSet = session.execute(statement); com.datastax.driver.core.Row row = resultSet.one(); return row != null ? row.getBytes("value") : null; }