scanStmt.append(initialStmt.substring(0, initialStmt.length() - 1)); scanStmt.append(" WHERE "); scanStmt.append(QueryBuilder.token(YCSB_KEY)); scanStmt.append(" >= "); scanStmt.append("token(");
@Test(groups = "short") public void dateHandlingTest() throws Exception { Date d = new Date(); session().execute(insertInto("dateTest").value("t", d)); String query = select().from("dateTest").where(eq(token("t"), fcall("token", d))).toString(); List<Row> rows = session().execute(query).all(); assertEquals(1, rows.size()); Row r1 = rows.get(0); assertEquals(d, r1.getTimestamp("t")); }
delete = delete().from("foo").where(gt(token("k)>0 OR token(k"), fcall("token", 42))); assertEquals(delete.toString(), query);
select = select().all().from("foo").where(gt(token("k)>0 OR token(k"), fcall("token", 42))); assertEquals(select.toString(), query);
case "=": clause = QueryBuilder.eq(QueryBuilder.token(names.toArray(new String[names.size()])), value); break; case ">": clause = QueryBuilder.gt(QueryBuilder.token(names.toArray(new String[names.size()])), value); break; case ">=": clause = QueryBuilder.gte(QueryBuilder.token(names.toArray(new String[names.size()])), value); break; case "<": clause = QueryBuilder.lt(QueryBuilder.token(names.toArray(new String[names.size()])), value); break; case "<=": clause = QueryBuilder.lte(QueryBuilder.token(names.toArray(new String[names.size()])), value); break; default:
ArrayList<String> cqlList = new ArrayList<>(); for (TokenRange subrange : tokenRange.unwrap()) { String token = QueryBuilder.token(partitionKeys.toArray(new String[]{}));
scanStmt.append(initialStmt.substring(0, initialStmt.length() - 1)); scanStmt.append(" WHERE "); scanStmt.append(QueryBuilder.token(YCSB_KEY)); scanStmt.append(" >= "); scanStmt.append("token('");
ArrayList<String> cqlList = new ArrayList<>(); for (TokenRange subrange : tokenRange.unwrap()) { String token = QueryBuilder.token(partitionKeys.toArray(new String[]{}));
/** * Scans a range of keys and returns an iterator containing each row's columns as an iterable. */ private Iterator<Iterable<Row>> rowScan(DeltaPlacement placement, @Nullable AstyanaxTable table, ByteBufferRange keyRange, ReadConsistency consistency) { ByteBuffer startToken = keyRange.getStart(); ByteBuffer endToken = keyRange.getEnd(); // Note: if Cassandra is asked to perform a token range query where start >= end it will wrap // around which is absolutely *not* what we want. checkArgument(AstyanaxStorage.compareKeys(startToken, endToken) < 0, "Cannot scan rows which loop from maximum- to minimum-token"); TableDDL tableDDL = placement.getDeltaTableDDL(); Statement statement = selectFrom(tableDDL) .where(gt(token(tableDDL.getRowKeyColumnName()), startToken)) .and(lte(token(tableDDL.getRowKeyColumnName()), endToken)) .setConsistencyLevel(SorConsistencies.toCql(consistency)); return deltaQueryAsync(placement, statement, false, "Failed to scan token range [%s, %s] for %s", ByteBufferUtil.bytesToHex(startToken), ByteBufferUtil.bytesToHex(endToken), table != null ? table : "multiple tables"); }
@Test(groups = "short") public void dateHandlingTest() throws Exception { Date d = new Date(); session().execute(insertInto("dateTest").value("t", d)); String query = select().from("dateTest").where(eq(token("t"), fcall("token", d))).toString(); List<Row> rows = session().execute(query).all(); assertEquals(1, rows.size()); Row r1 = rows.get(0); assertEquals(d, r1.getTimestamp("t")); }
/** * Scans a range of keys and returns an iterator containing each row's columns as an iterable. */ private Iterator<Iterable<Row>> rowScan(DeltaPlacement placement, @Nullable AstyanaxTable table, ByteBufferRange keyRange, ReadConsistency consistency) { ByteBuffer startToken = keyRange.getStart(); ByteBuffer endToken = keyRange.getEnd(); // Note: if Cassandra is asked to perform a token range query where start >= end it will wrap // around which is absolutely *not* what we want. checkArgument(AstyanaxStorage.compareKeys(startToken, endToken) < 0, "Cannot scan rows which loop from maximum- to minimum-token"); BlockedDeltaTableDDL tableDDL = placement.getBlockedDeltaTableDDL(); Statement statement = selectDeltaFrom(tableDDL) .where(gt(token(tableDDL.getRowKeyColumnName()), startToken)) .and(lte(token(tableDDL.getRowKeyColumnName()), endToken)) .setConsistencyLevel(SorConsistencies.toCql(consistency)); return deltaQueryAsync(placement, statement, false, "Failed to scan token range [%s, %s] for %s", ByteBufferUtil.bytesToHex(startToken), ByteBufferUtil.bytesToHex(endToken), table != null ? table : "multiple tables"); }
/** * Scans a range of keys and returns an iterator containing each row's columns as an iterable. */ private Iterator<Iterable<Row>> rowScan(DeltaPlacement placement, @Nullable AstyanaxTable table, ByteBufferRange keyRange, ReadConsistency consistency) { ByteBuffer startToken = keyRange.getStart(); ByteBuffer endToken = keyRange.getEnd(); // Note: if Cassandra is asked to perform a token range query where start >= end it will wrap // around which is absolutely *not* what we want. checkArgument(AstyanaxStorage.compareKeys(startToken, endToken) < 0, "Cannot scan rows which loop from maximum- to minimum-token"); BlockedDeltaTableDDL tableDDL = placement.getBlockedDeltaTableDDL(); Statement statement = selectDeltaFrom(tableDDL) .where(gt(token(tableDDL.getRowKeyColumnName()), startToken)) .and(lte(token(tableDDL.getRowKeyColumnName()), endToken)) .setConsistencyLevel(SorConsistencies.toCql(consistency)); return deltaQueryAsync(placement, statement, false, "Failed to scan token range [%s, %s] for %s", ByteBufferUtil.bytesToHex(startToken), ByteBufferUtil.bytesToHex(endToken), table != null ? table : "multiple tables"); }
/** * Scans a range of keys and returns an iterator containing each row's columns as an iterable. */ private Iterator<Iterable<Row>> rowScan(DeltaPlacement placement, @Nullable AstyanaxTable table, ByteBufferRange keyRange, ReadConsistency consistency) { ByteBuffer startToken = keyRange.getStart(); ByteBuffer endToken = keyRange.getEnd(); // Note: if Cassandra is asked to perform a token range query where start >= end it will wrap // around which is absolutely *not* what we want. checkArgument(AstyanaxStorage.compareKeys(startToken, endToken) < 0, "Cannot scan rows which loop from maximum- to minimum-token"); TableDDL tableDDL = placement.getDeltaTableDDL(); Statement statement = selectFrom(tableDDL) .where(gt(token(tableDDL.getRowKeyColumnName()), startToken)) .and(lte(token(tableDDL.getRowKeyColumnName()), endToken)) .setConsistencyLevel(SorConsistencies.toCql(consistency)); return deltaQueryAsync(placement, statement, false, "Failed to scan token range [%s, %s] for %s", ByteBufferUtil.bytesToHex(startToken), ByteBufferUtil.bytesToHex(endToken), table != null ? table : "multiple tables"); }
private Iterator<Iterable<Row>> migrationScan(DeltaPlacement placement, ByteBufferRange keyRange, ReadConsistency consistency) { ByteBuffer startToken = keyRange.getStart(); ByteBuffer endToken = keyRange.getEnd(); // Note: if Cassandra is asked to perform a token range query where start >= end it will wrap // around which is absolutely *not* what we want. checkArgument(AstyanaxStorage.compareKeys(startToken, endToken) < 0, "Cannot migrate rows which loop from maximum- to minimum-token"); TableDDL tableDDL = placement.getDeltaTableDDL(); // Our query needs to be inclusive on both sides so that we ensure that we get all records in the event of a re-split Statement statement = selectFrom(tableDDL) .where(gte(token(tableDDL.getRowKeyColumnName()), startToken)) .and(lte(token(tableDDL.getRowKeyColumnName()), endToken)) .setConsistencyLevel(SorConsistencies.toCql(consistency)); return deltaQueryAsync(placement, statement, false, "Failed to scan (for migration) token range [%s, %s] for %s", ByteBufferUtil.bytesToHex(startToken), ByteBufferUtil.bytesToHex(endToken), "multiple tables"); }
private Iterator<Iterable<Row>> migrationScan(DeltaPlacement placement, ByteBufferRange keyRange, ReadConsistency consistency) { ByteBuffer startToken = keyRange.getStart(); ByteBuffer endToken = keyRange.getEnd(); // Note: if Cassandra is asked to perform a token range query where start >= end it will wrap // around which is absolutely *not* what we want. checkArgument(AstyanaxStorage.compareKeys(startToken, endToken) < 0, "Cannot migrate rows which loop from maximum- to minimum-token"); TableDDL tableDDL = placement.getDeltaTableDDL(); // Our query needs to be inclusive on both sides so that we ensure that we get all records in the event of a re-split Statement statement = selectFrom(tableDDL) .where(gte(token(tableDDL.getRowKeyColumnName()), startToken)) .and(lte(token(tableDDL.getRowKeyColumnName()), endToken)) .setConsistencyLevel(SorConsistencies.toCql(consistency)); return deltaQueryAsync(placement, statement, false, "Failed to scan (for migration) token range [%s, %s] for %s", ByteBufferUtil.bytesToHex(startToken), ByteBufferUtil.bytesToHex(endToken), "multiple tables"); }
delete = delete().from("foo").where(gt(token("k)>0 OR token(k"), fcall("token", 42))); assertEquals(delete.toString(), query);
select = select().all().from("foo").where(gt(token("k)>0 OR token(k"), fcall("token", 42))); assertEquals(select.toString(), query);