.withColumnRange( rangeBuilder.build() );
.withColumnRange( rangeBuilder.build() );
.withColumnRange( maxVersion, null, false, 1 ).execute().getResult() .iterator();
.withColumnRange( maxVersion, null, false, 1 ).execute().getResult();
rq.withColumnRange(query.getSliceStart().asByteBuffer(), query.getSliceEnd().asByteBuffer(), false,
@Override public KeyIterator getKeys(KeyRangeQuery query, StoreTransaction txh) throws BackendException { // this query could only be done when byte-ordering partitioner is used // because Cassandra operates on tokens internally which means that even contiguous // range of keys (e.g. time slice) with random partitioner could produce disjoint set of tokens // returning ambiguous results to the user. Partitioner partitioner = storeManager.getPartitioner(); if (partitioner != Partitioner.BYTEORDER) throw new PermanentBackendException("getKeys(KeyRangeQuery could only be used with byte-ordering partitioner."); ByteBuffer start = query.getKeyStart().asByteBuffer(), end = query.getKeyEnd().asByteBuffer(); RowSliceQuery rowSlice = keyspace.prepareQuery(columnFamily) .setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate()) .getKeyRange(start, end, null, null, Integer.MAX_VALUE); // Astyanax is bad at builder pattern :( rowSlice.withColumnRange(query.getSliceStart().asByteBuffer(), query.getSliceEnd().asByteBuffer(), false, query.getLimit()); // Omit final the query's keyend from the result, if present in result final Rows<ByteBuffer, ByteBuffer> r; try { r = ((OperationResult<Rows<ByteBuffer, ByteBuffer>>) rowSlice.execute()).getResult(); } catch (ConnectionException e) { throw new TemporaryBackendException(e); } Iterator<Row<ByteBuffer, ByteBuffer>> i = Iterators.filter(r.iterator(), new KeySkipPredicate(query.getKeyEnd().asByteBuffer())); return new RowIterator(i, query); }
private void testColumnCountMultipleRowKeysAndColumnRange(boolean rowDeleted) throws Exception { Collection<String> rowKeys = getRandomRowKeys(); // Get a random start column int rand = new Random().nextInt(26); char randCH = (char) ('a' + rand); String startCol = String.valueOf(randCH); Map<String, Integer> columnCountsPerRowKey = keyspace .prepareQuery(CF_COLUMN_RANGE_TEST) .getRowSlice(rowKeys) .withColumnRange(startCol, "z", false, -1) .getColumnCounts() .execute().getResult(); int charOffset = startCol.charAt(0) - 'a' + 1; int expectedColCount = 26 - charOffset + 1; Map<String, Integer> expected = new HashMap<String, Integer>(); if (!rowDeleted) { for (String key : rowKeys) { expected.put(key, expectedColCount); } } Assert.assertEquals("expected: " + expected + " colCount: " + columnCountsPerRowKey, expected, columnCountsPerRowKey); }
private void testColumnCountRowRangeAndColumnRange(boolean rowDeleted) throws Exception { // Get the random start column int rand = new Random().nextInt(26); char randCH = (char) ('a' + rand); String startColumn = String.valueOf(randCH); int expectedColCount = 'z' - startColumn.charAt(0) + 1; List<TestTokenRange> testRanges = TestUtils.getTestTokenRanges(); Map<String, Integer> expectedRowCounts = rowDeleted ? new HashMap<String, Integer>() : getExpectedRowCountsForTokenRanges(testRanges, expectedColCount); Map<String, Integer> resultRowCounts = new HashMap<String, Integer>(); for (TestTokenRange testRange : testRanges) { Map<String, Integer> rowCounts = keyspace.prepareQuery(CF_COLUMN_RANGE_TEST) .getRowRange(null, null, testRange.startToken, testRange.endToken, -1) .withColumnRange(startColumn, "z", false, -1) .getColumnCounts() .execute().getResult(); resultRowCounts.putAll(rowCounts); } Assert.assertEquals(expectedRowCounts, resultRowCounts); }
/** * Queries for rows given an enumerated list of Cassandra row keys. */ private Iterator<Record> rowQuery(DeltaPlacement placement, List<Map.Entry<ByteBuffer, Key>> keys, ReadConsistency consistency) { // Build the list of row IDs to query for. List<ByteBuffer> rowIds = Lists.transform(keys, entryKeyFunction()); // Query for Delta & Compaction info, just the first 50 columns for now. final Rows<ByteBuffer, UUID> rows = execute(placement.getKeyspace() .prepareQuery(placement.getDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKeySlice(rowIds) .withColumnRange(_maxColumnsRange), "query %d keys from placement %s", rowIds.size(), placement.getName()); // Track metrics _randomReadMeter.mark(rowIds.size()); // Return an iterator that decodes the row results, avoiding pinning multiple decoded rows into memory at once. return decodeRows(keys, rows, _maxColumnsRange.getLimit(), consistency); }
/** * Queries for rows given an enumerated list of Cassandra row keys. */ private Iterator<Record> rowQuery(DeltaPlacement placement, List<Map.Entry<ByteBuffer, Key>> keys, ReadConsistency consistency) { // Build the list of row IDs to query for. List<ByteBuffer> rowIds = Lists.transform(keys, entryKeyFunction()); // Query for Delta & Compaction info, just the first 50 columns for now. final Rows<ByteBuffer, DeltaKey> rows = execute(placement.getKeyspace() .prepareQuery(placement.getBlockedDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKeySlice(rowIds) .withColumnRange(_maxColumnsRange), "query %d keys from placement %s", rowIds.size(), placement.getName()); // Track metrics _randomReadMeter.mark(rowIds.size()); // Return an iterator that decodes the row results, avoiding pinning multiple decoded rows into memory at once. return decodeRows(keys, rows, _maxColumnsRange.getLimit(), consistency); }
/** * Queries for rows given an enumerated list of Cassandra row keys. */ private Iterator<Record> rowQuery(DeltaPlacement placement, List<Map.Entry<ByteBuffer, Key>> keys, ReadConsistency consistency) { // Build the list of row IDs to query for. List<ByteBuffer> rowIds = Lists.transform(keys, entryKeyFunction()); // Query for Delta & Compaction info, just the first 50 columns for now. final Rows<ByteBuffer, UUID> rows = execute(placement.getKeyspace() .prepareQuery(placement.getDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKeySlice(rowIds) .withColumnRange(_maxColumnsRange), "query %d keys from placement %s", rowIds.size(), placement.getName()); // Track metrics _randomReadMeter.mark(rowIds.size()); // Return an iterator that decodes the row results, avoiding pinning multiple decoded rows into memory at once. return decodeRows(keys, rows, _maxColumnsRange.getLimit(), consistency); }
.withColumnRange(columns) .execute().getResult(); if (rowDeleted) {
.withColumnRange(columnRange) .execute().getResult();
/** * Queries for rows given an enumerated list of Cassandra row keys. */ private Iterator<Record> rowQuery(DeltaPlacement placement, List<Map.Entry<ByteBuffer, Key>> keys, ReadConsistency consistency) { // Build the list of row IDs to query for. List<ByteBuffer> rowIds = Lists.transform(keys, entryKeyFunction()); // Query for Delta & Compaction info, just the first 50 columns for now. final Rows<ByteBuffer, DeltaKey> rows = execute(placement.getKeyspace() .prepareQuery(placement.getBlockedDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKeySlice(rowIds) .withColumnRange(_maxColumnsRange), "query %d keys from placement %s", rowIds.size(), placement.getName()); // Track metrics _randomReadMeter.mark(rowIds.size()); // Return an iterator that decodes the row results, avoiding pinning multiple decoded rows into memory at once. return decodeRows(keys, rows, _maxColumnsRange.getLimit(), consistency); }
@Test public void testRowKeysColumnRangeQuery() throws Exception { keyspace.prepareQuery(CF_ROW_RANGE) .withCaching(true) .getRowSlice("A", "B", "C", "D") .withColumnRange(new CqlRangeBuilder<String>() .setStart("a") .setEnd("c") .build()) .execute(); Rows<String, String> result = keyspace.prepareQuery(CF_ROW_RANGE) .withCaching(true) .getRowSlice("E", "F", "G", "H") .withColumnRange(new CqlRangeBuilder<String>() .setStart("d") .setEnd("h") .build()) .execute() .getResult(); Assert.assertNotNull(result); Assert.assertTrue(4 == result.size()); for (Row<String, String> row : result) { int rowNo = row.getKey().charAt(0) - 'A'; Assert.assertTrue(rowNo >= 4); Assert.assertTrue(rowNo <= 7); Assert.assertTrue(5 == row.getColumns().size()); } }
@Override public Map<UUID, ByteBuffer> findMaxRecords(Collection<UUID> dataIds) { // Finding the max using a reversed column range shouldn't have to worry about skipping tombstones since // we always delete smaller column values before deleting larger column values--scanning will hit the max // before needing to skip over tombstones. Map<UUID, ByteBuffer> resultMap = Maps.newHashMap(); for (List<UUID> batch : Iterables.partition(dataIds, 10)) { Rows<UUID, ByteBuffer> rows = execute( _keyspace.prepareQuery(CF_DEDUP_DATA, ConsistencyLevel.CL_LOCAL_QUORUM) .getKeySlice(batch) .withColumnRange(new RangeBuilder() .setReversed(true) .setLimit(1) .build())); for (Row<UUID, ByteBuffer> row : rows) { UUID dataId = row.getKey(); for (Column<ByteBuffer> column : row.getColumns()) { resultMap.put(dataId, column.getName()); } } } return resultMap; }
@Override public Map<UUID, ByteBuffer> findMaxRecords(Collection<UUID> dataIds) { // Finding the max using a reversed column range shouldn't have to worry about skipping tombstones since // we always delete smaller column values before deleting larger column values--scanning will hit the max // before needing to skip over tombstones. Map<UUID, ByteBuffer> resultMap = Maps.newHashMap(); for (List<UUID> batch : Iterables.partition(dataIds, 10)) { Rows<UUID, ByteBuffer> rows = execute( _keyspace.prepareQuery(CF_DEDUP_DATA, ConsistencyLevel.CL_LOCAL_QUORUM) .getKeySlice(batch) .withColumnRange(new RangeBuilder() .setReversed(true) .setLimit(1) .build())); for (Row<UUID, ByteBuffer> row : rows) { UUID dataId = row.getKey(); for (Column<ByteBuffer> column : row.getColumns()) { resultMap.put(dataId, column.getName()); } } } return resultMap; }
@Test public void testRowRangeColumnRangeQuery() throws Exception { String startToken = Murmur3Partitioner.get().getTokenForKey(StringSerializer.get().fromString("A")); String endToken = Murmur3Partitioner.get().getTokenForKey(StringSerializer.get().fromString("G")); keyspace.prepareQuery(CF_ROW_RANGE) .withCaching(true) .getRowRange(null, null, startToken, endToken, 10) .withColumnRange(new CqlRangeBuilder<String>() .setStart("d") .setEnd("h") .build()) .execute(); Rows<String, String> result = keyspace.prepareQuery(CF_ROW_RANGE) .withCaching(true) .getRowRange(null, null, startToken, endToken, 10) .withColumnRange(new CqlRangeBuilder<String>() .setStart("d") .setEnd("h") .build()) .execute() .getResult(); Assert.assertNotNull(result); Assert.assertTrue(3 == result.size()); for (Row<String, String> row : result) { Assert.assertTrue(5 == row.getColumns().size()); } }
@Override public KeyIterator getKeys(KeyRangeQuery query, StoreTransaction txh) throws BackendException { // this query could only be done when byte-ordering partitioner is used // because Cassandra operates on tokens internally which means that even contiguous // range of keys (e.g. time slice) with random partitioner could produce disjoint set of tokens // returning ambiguous results to the user. Partitioner partitioner = storeManager.getPartitioner(); if (partitioner != Partitioner.BYTEORDER) throw new PermanentBackendException("getKeys(KeyRangeQuery could only be used with byte-ordering partitioner."); ByteBuffer start = query.getKeyStart().asByteBuffer(), end = query.getKeyEnd().asByteBuffer(); RowSliceQuery rowSlice = keyspace.prepareQuery(columnFamily) .setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate()) .getKeyRange(start, end, null, null, Integer.MAX_VALUE); // Astyanax is bad at builder pattern :( rowSlice.withColumnRange(query.getSliceStart().asByteBuffer(), query.getSliceEnd().asByteBuffer(), false, query.getLimit()); // Omit final the query's keyend from the result, if present in result final Rows<ByteBuffer, ByteBuffer> r; try { r = ((OperationResult<Rows<ByteBuffer, ByteBuffer>>) rowSlice.execute()).getResult(); } catch (ConnectionException e) { throw new TemporaryBackendException(e); } Iterator<Row<ByteBuffer, ByteBuffer>> i = Iterators.filter(r.iterator(), new KeySkipPredicate(query.getKeyEnd().asByteBuffer())); return new RowIterator(i, query); }
@Override public KeyIterator getKeys(KeyRangeQuery query, StoreTransaction txh) throws BackendException { // this query could only be done when byte-ordering partitioner is used // because Cassandra operates on tokens internally which means that even contiguous // range of keys (e.g. time slice) with random partitioner could produce disjoint set of tokens // returning ambiguous results to the user. Partitioner partitioner = storeManager.getPartitioner(); if (partitioner != Partitioner.BYTEORDER) throw new PermanentBackendException("getKeys(KeyRangeQuery could only be used with byte-ordering partitioner."); ByteBuffer start = query.getKeyStart().asByteBuffer(), end = query.getKeyEnd().asByteBuffer(); RowSliceQuery rowSlice = keyspace.prepareQuery(columnFamily) .setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate()) .getKeyRange(start, end, null, null, Integer.MAX_VALUE); // Astyanax is bad at builder pattern :( rowSlice.withColumnRange(query.getSliceStart().asByteBuffer(), query.getSliceEnd().asByteBuffer(), false, query.getLimit()); // Omit final the query's keyend from the result, if present in result final Rows<ByteBuffer, ByteBuffer> r; try { r = ((OperationResult<Rows<ByteBuffer, ByteBuffer>>) rowSlice.execute()).getResult(); } catch (ConnectionException e) { throw new TemporaryBackendException(e); } Iterator<Row<ByteBuffer, ByteBuffer>> i = Iterators.filter(r.iterator(), new KeySkipPredicate(query.getKeyEnd().asByteBuffer())); return new RowIterator(i, query); }