private void advance(){ Clause queueNameClause = QueryBuilder.eq( ShardSerializationImpl.COLUMN_QUEUE_NAME, queueName); Clause regionClause = QueryBuilder.eq( ShardSerializationImpl.COLUMN_REGION, region); Clause activeClause = QueryBuilder.eq( ShardSerializationImpl.COLUMN_ACTIVE, 1); Clause shardIdClause; if (nextStart == 0L && lastShardId.isPresent()) { shardIdClause = QueryBuilder.gt( ShardSerializationImpl.COLUMN_SHARD_ID, lastShardId.get() ); } else if (nextStart == 0L && !lastShardId.isPresent()) { shardIdClause = QueryBuilder.gte( ShardSerializationImpl.COLUMN_SHARD_ID, 0L ); } else { shardIdClause = QueryBuilder.gt( ShardSerializationImpl.COLUMN_SHARD_ID, nextStart ); } Statement query = QueryBuilder.select().all().from(ShardSerializationImpl.getTableName(shardType)) .where(queueNameClause) .and(regionClause) .and(activeClause) .and(shardIdClause) .limit(PAGE_SIZE); List<Row> rows = cassandraClient.getQueueMessageSession().execute(query).all(); currentIterator = getIteratorFromRows(rows); }
selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); stmt = session.prepare(selectBuilder.from(table) .where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker())) .limit(1)); stmt.setConsistencyLevel(readConsistencyLevel); if (trace) { logger.debug("key = {}", key); ResultSet rs = session.execute(stmt.bind(key)); if (rs.isExhausted()) { return Status.NOT_FOUND; Row row = rs.one(); ColumnDefinitions cd = row.getColumnDefinitions();
@Override public List<CassandraEndpointUserConfiguration> findByUserId(String userId) { LOG.debug("Searching for user specific configurations by user id {}", userId); Select.Where select = select().from(getColumnFamilyName()) .where(eq(EP_USER_CONF_USER_ID_PROPERTY, userId)); List<CassandraEndpointUserConfiguration> configurationList = findListByStatement(select); if (LOG.isTraceEnabled()) { LOG.trace("[{}] Search result: {}.", userId, Arrays.toString(configurationList.toArray())); } else { LOG.debug("[{}] Search result: {}.", userId, configurationList.size()); } return configurationList; }
private <T> T getValuesCQL( final MapScope scope, final Collection<String> keys, final ResultsBuilderCQL<T> builder ) { final List<ByteBuffer> serializedKeys = new ArrayList<>(); keys.forEach(key -> serializedKeys.add(getMapEntryPartitionKey(scope,key))); Clause in = QueryBuilder.in("key", serializedKeys ); Statement statement = QueryBuilder.select().all().from(MAP_ENTRIES_TABLE) .where(in); ResultSet resultSet = session.execute(statement); return builder.buildResultsCQL( resultSet ); }
/** * @return cql query statement to retrieve a workflow with its tasks from the "workflows" table */ public String getSelectWorkflowWithTasksStatement() { return QueryBuilder.select() .all() .from(keyspace, TABLE_WORKFLOWS) .where(eq(WORKFLOW_ID_KEY, bindMarker())) .and(eq(SHARD_ID_KEY, bindMarker())) .getQueryString(); }
selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); Select selectStmt = selectBuilder.from(table); scanStmt.append(QueryBuilder.bindMarker()); stmt = session.prepare(scanStmt.toString()); stmt.setConsistencyLevel(readConsistencyLevel); if (trace) { logger.debug("startKey = {}, recordcount = {}", startkey, recordcount); ResultSet rs = session.execute(stmt.bind(startkey, Integer.valueOf(recordcount))); while (!rs.isExhausted()) { Row row = rs.one(); tuple = new HashMap<String, ByteIterator>();
ResultSet results = session.execute(select().from("notification")); for (Row row : results) { String id = row.getString("nf_id"); update("notification") .with(set("schema_id", String.valueOf(schemaId + idShift))) .where(eq("topic_id", ids[0])) .and(eq("nf_type", ids[1])) results = session.execute(select().from("ep_nfs")); for (Row row : results) { String id = row.getString("nf_id"); session.execute(batchStatement); session.close(); cluster.close();
@Override public void removeTransferLog( String queueName, String source, String dest, UUID messageId ) throws QakkaException { Statement query = QueryBuilder.select().all().from(TABLE_TRANSFER_LOG) .where( QueryBuilder.eq( COLUMN_QUEUE_NAME, queueName )) .and( QueryBuilder.eq( COLUMN_DEST_REGION, dest )) .and( QueryBuilder.eq( COLUMN_MESSAGE_ID, messageId )); ResultSet rs = cassandraClient.getApplicationSession().execute( query ); if ( rs.getAvailableWithoutFetching() == 0 ) { StringBuilder sb = new StringBuilder(); sb.append( "Transfer log entry not found for queueName=" ).append( queueName ); sb.append( " dest=" ).append( dest ); sb.append( " messageId=" ).append( messageId ); throw new QakkaException( sb.toString() ); } Statement deleteQuery = QueryBuilder.delete().from(TABLE_TRANSFER_LOG) .where( QueryBuilder.eq( COLUMN_QUEUE_NAME, queueName )) .and( QueryBuilder.eq( COLUMN_DEST_REGION, dest )) .and( QueryBuilder.eq( COLUMN_MESSAGE_ID, messageId )); cassandraClient.getApplicationSession().execute( deleteQuery ); }
@Test(groups = "short") public void dateHandlingTest() throws Exception { Date d = new Date(); session().execute(insertInto("dateTest").value("t", d)); String query = select().from("dateTest").where(eq(token("t"), fcall("token", d))).toString(); List<Row> rows = session().execute(query).all(); assertEquals(1, rows.size()); Row r1 = rows.get(0); assertEquals(d, r1.getTimestamp("t")); }
@Test(groups = "short") public void selectInjectionTests() throws Exception { String query; Statement select; PreparedStatement ps; BoundStatement bs; session().execute("CREATE TABLE foo ( k ascii PRIMARY KEY , i int, s ascii )"); query = "SELECT * FROM foo WHERE k=?;"; select = select().all().from("foo").where(eq("k", bindMarker())); ps = session().prepare(select.toString()); bs = ps.bind(); assertEquals(select.toString(), query); session().execute(bs.setString("k", "4 AND c=5")); }
.execute( select("a", "b", "e", count("b"), max("e")) .from(table) .groupBy("a", "b") .execute( select("a", "b", "e", count("b"), max("e")) .from(table) .groupBy("a", "b", "c"))) .containsExactly(row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(1, 4, 12, 2L, 24)); select("a", count("a")).distinct().from(table).where(eq("a", 1)).groupBy("a"))) .containsExactly(row(1, 1L)); .column("a") .column("b") .as("clustering1") .max("c") .column("a") .column("b") .max("c") session().execute(select().column("a").column("b").max("d").from(table).groupBy("a")); fail("Expecting IQE"); } catch (InvalidQueryException e) {
select = select().all().from("foo").where(eq("k", 4)).and(gt("c", "a")).and(lte("c", "z")); assertEquals(select.toString(), query); select().all().from("foo").where().and(eq("k", 4)).and(gt("c", "a")).and(lte("c", "z")); assertEquals(select.toString(), query); select = select().writeTime("a").ttl("a").from("foo").allowFiltering(); assertEquals(select.toString(), query); .distinct() .column("longName") .as("a") .ttl("longName") .as("ttla") .from("foo") .distinct() .column("longName") .as("a") .ttl("longName") .as("ttla") .from("foo") .all() select = select().countAll().from("foo"); assertEquals(select.toString(), query); select = select().fcall("intToBlob", column("b")).from("foo");
@Override public OptionalLong analysesCount(Identifier accountId) { Statement statement = QueryBuilder.select().countAll() .from(TABLE_NAME).where(eq("account_id", accountId.toString())); ResultSet rs = connector.session().execute(statement); Iterator<Row> r = rs.iterator(); if (r.hasNext()) { return OptionalLong.of(r.next().getLong(0)); } else { return OptionalLong.empty(); } }
@Override public Result<TransferLog> getAllTransferLogs(PagingState pagingState, int fetchSize ) { Statement query = QueryBuilder.select().all().from(TABLE_TRANSFER_LOG); ResultSet rs = cassandraClient.getApplicationSession().execute( query ); final PagingState newPagingState = rs.getExecutionInfo().getPagingState(); int numReturned = rs.getAvailableWithoutFetching(); for ( int i=0; i<numReturned; i++ ) { Row row = rs.one(); TransferLog tlog = new TransferLog( row.getString( COLUMN_QUEUE_NAME ),
private ImmutableList<BuiltStatement> idempotentBuiltStatements() { return ImmutableList.<BuiltStatement>of( update("foo").with(set("v", 1)).where(eq("k", 1)), // set simple value update("foo").with(add("s", 1)).where(eq("k", 1)), // add to set update("foo").with(put("m", "a", 1)).where(eq("k", 1)), // put in map // select statements should be idempotent even with function calls select().countAll().from("foo").where(eq("k", 1)), select().ttl("v").from("foo").where(eq("k", 1)), select().writeTime("v").from("foo").where(eq("k", 1)), select().fcall("token", "k").from("foo").where(eq("k", 1))); }
@Override public List<String> getListOfQueues() { logger.trace( "getListOfQueues " ); Statement select = QueryBuilder.select().all().from( TABLE_QUEUES ); ResultSet rs = cassandraClient.getApplicationSession().execute( select ); return rs.all().stream() .map( row -> row.getString( COLUMN_QUEUE_NAME )) .collect( Collectors.toList() ); }
@Test(groups = "unit") public void should_handle_per_partition_limit_clause() { assertThat(select().all().from("foo").perPartitionLimit(2).toString()) .isEqualTo("SELECT * FROM foo PER PARTITION LIMIT 2;"); assertThat(select().all().from("foo").perPartitionLimit(bindMarker()).toString()) .isEqualTo("SELECT * FROM foo PER PARTITION LIMIT ?;"); assertThat(select().all().from("foo").perPartitionLimit(bindMarker("limit")).toString()) .isEqualTo("SELECT * FROM foo PER PARTITION LIMIT :limit;"); assertThat(select().all().from("foo").perPartitionLimit(2).limit(bindMarker()).toString()) .isEqualTo("SELECT * FROM foo PER PARTITION LIMIT 2 LIMIT ?;"); .all() .all() .all() .all() "SELECT * FROM foo WHERE a=? AND b>? ORDER BY b DESC PER PARTITION LIMIT ? LIMIT 3 ALLOW FILTERING;"); try { select().distinct().all().from("foo").perPartitionLimit(3); fail("Should not allow DISTINCT + PER PARTITION LIMIT"); } catch (Exception e) { select().all().from("foo").perPartitionLimit(-1); fail("Should not allow negative limit"); } catch (IllegalArgumentException e) { select().all().from("foo").perPartitionLimit(1).perPartitionLimit(bindMarker());
private T updateLocked(T entity) { long version = (entity.getVersion() == null) ? 0L : entity.getVersion(); Assignments assigns = update(getColumnFamilyName()) .onlyIf(eq(OPT_LOCK, version)) .with(set(OPT_LOCK, version + 1)); CassandraEntityMapper<T> entityMapper = CassandraEntityMapper.getEntityMapperForClass( getColumnFamilyClass(), cassandraClient); if (!res.wasApplied()) { LOG.error("[{}] Can't update entity with version {}. Entity already changed!", getColumnFamilyClass(), version); + version + ". Entity already changed!"); } else { Select.Where where = select().from(getColumnFamilyName()).where(whereClauses[0]); if (whereClauses.length > 1) { for (int i = 1; i < whereClauses.length; i++) { where = where.and(whereClauses[i]);
boolean initializeCompactStorage() throws PermanentBackendException { try { final ResultSet versionResultSet = this.session.execute( select().column("release_version").from("system", "local") ); final String version = versionResultSet.one().getString(0); final int major = Integer.parseInt(version.substring(0, version.indexOf("."))); // starting with Cassandra 3 COMPACT STORAGE is deprecated and has no impact return (major < 3); } catch (NumberFormatException | NoHostAvailableException | QueryExecutionException | QueryValidationException e) { throw new PermanentBackendException("Error determining Cassandra version", e); } }