final Column<UUID> column = columns.getColumnByIndex( 0 );
final Column<Boolean> column = columns.getColumnByIndex( 0 );
final Column<UUID> column = columns.getColumnByIndex( 0 );
private void checkResult(ColumnList<Population> result, Population ... expected) throws Exception { Assert.assertFalse(result.isEmpty()); Assert.assertEquals(expected.length, result.size()); int index = 0; for (Population p : expected) { Assert.assertEquals(p, result.getColumnByIndex(index++).getName()); } }
private void testRowKeysWithAllColumns(boolean rowDeleted) throws Exception { Set<String> rowKeys = getRandomRowKeys(); Rows<String, String> rows = keyspace.prepareQuery(CF_COLUMN_RANGE_TEST).getRowSlice(rowKeys).execute().getResult(); if (rowDeleted) { Assert.assertTrue(rows.isEmpty()); return; } Assert.assertFalse(rows.isEmpty()); int rowKeysSize = rowKeys.size(); for (Row<String, String> row : rows) { boolean isPresent = rowKeys.remove(row.getKey()); Assert.assertTrue("Extraneous row: " + row.getKey(), isPresent); ColumnList<String> colList = row.getColumns(); Assert.assertEquals(26, colList.size()); for(int index=0; index<26; index++) { Column<String> col = colList.getColumnByIndex(index); Assert.assertTrue(String.valueOf((char)('a' + index)).equals(col.getName())); Assert.assertEquals(index + 1, col.getIntegerValue()); } } Assert.assertEquals(rowKeysSize, rows.size()); }
@Nullable @Override public ByteBuffer findMinRecord(UUID dataId, @Nullable ByteBuffer from) { // Use a column range with a "start" to skip past tombstones. ColumnList<ByteBuffer> columns = execute(_keyspace.prepareQuery(CF_DEDUP_DATA, ConsistencyLevel.CL_LOCAL_QUORUM) .getKey(dataId) .withColumnRange(new RangeBuilder() .setStart(Objects.firstNonNull(from, EMPTY_BUFFER)) .setLimit(1) .build())); return !columns.isEmpty() ? columns.getColumnByIndex(0).getName() : null; }
@Nullable @Override public ByteBuffer findMinRecord(UUID dataId, @Nullable ByteBuffer from) { // Use a column range with a "start" to skip past tombstones. ColumnList<ByteBuffer> columns = execute(_keyspace.prepareQuery(CF_DEDUP_DATA, ConsistencyLevel.CL_LOCAL_QUORUM) .getKey(dataId) .withColumnRange(new RangeBuilder() .setStart(Objects.firstNonNull(from, EMPTY_BUFFER)) .setLimit(1) .build())); return !columns.isEmpty() ? columns.getColumnByIndex(0).getName() : null; }
@Test public void runRowCopyTest() throws Exception { MutationBatch m = keyspace.prepareMutationBatch(); m.withRow(CF_ROW_COPY, 10).putColumn("c1", 1).putColumn("c2", 2); m.execute(); ColumnList<String> result = keyspace.prepareQuery(CF_ROW_COPY).getRow(10).execute().getResult(); Column<String> column = result.getColumnByIndex(0); Assert.assertEquals("c1", column.getName()); Assert.assertEquals(1, column.getIntegerValue()); column = result.getColumnByIndex(1); Assert.assertEquals("c2", column.getName()); Assert.assertEquals(2, column.getIntegerValue()); keyspace.prepareQuery(CF_ROW_COPY).getRow(10).copyTo(CF_ROW_COPY2, 11).execute(); ColumnList<String> result2 = keyspace.prepareQuery(CF_ROW_COPY2).getRow(11).execute().getResult(); column = result2.getColumnByIndex(0); Assert.assertEquals("c1", column.getName()); Assert.assertEquals(1, column.getIntegerValue()); column = result2.getColumnByIndex(1); Assert.assertEquals("c2", column.getName()); Assert.assertEquals(2, column.getIntegerValue()); } }
if (result.getResult().size() > 0 && !result.getResult().getColumnByIndex(0).getName().equals(instance.getInstanceId())) throw new Exception(String.format("Lock already taken %s", lockKey)); Thread.sleep(100); result = bootKeyspace.prepareQuery(CF_LOCKS).getKey(lockKey).execute(); if (result.getResult().size() == 1 && result.getResult().getColumnByIndex(0).getName().equals(instance.getInstanceId())) { logger.info("Got lock " + lockKey); return;
OperationResult<ColumnList<String>> result = bootKeyspace.prepareQuery(CF_LOCKS).getKey(lockKey).execute(); if (result.getResult().size() > 0 && !result.getResult().getColumnByIndex(0).getName().equals(instance.getInstanceId())) throw new Exception(String.format("Lock already taken %s", lockKey)); result = bootKeyspace.prepareQuery(CF_LOCKS).getKey(lockKey).execute(); if (result.getResult().size() == 1 && result.getResult().getColumnByIndex(0).getName().equals(instance.getInstanceId())) { logger.info("Got lock " + lockKey); return;
@Test public void testColumnRangeSlice() throws ConnectionException { OperationResult<ColumnList<String>> r1 = keyspace .prepareQuery(CF_STANDARD1) .getKey("A") .withColumnRange( new RangeBuilder().setStart("a").setEnd("b") .setLimit(5).build()).execute(); Assert.assertEquals(2, r1.getResult().size()); OperationResult<ColumnList<String>> r2 = keyspace .prepareQuery(CF_STANDARD1).getKey("A") .withColumnRange("a", null, false, 5).execute(); Assert.assertEquals(5, r2.getResult().size()); Assert.assertEquals("a", r2.getResult().getColumnByIndex(0).getName()); ByteBuffer EMPTY_BUFFER = ByteBuffer.wrap(new byte[0]); OperationResult<ColumnList<String>> r3 = keyspace .prepareQuery(CF_STANDARD1).getKey("A") .withColumnRange(EMPTY_BUFFER, EMPTY_BUFFER, true, 5).execute(); Assert.assertEquals(5, r3.getResult().size()); Assert.assertEquals("z", r3.getResult().getColumnByIndex(0).getName()); }
private StorageSummary toStorageSummary(ColumnList<Composite> columns) { if (columns.size() == 0) { return null; } // Read the summary column with the attributes, length etc. Column<Composite> summaryColumn = columns.getColumnByIndex(0); if (summaryColumn == null || !matches(summaryColumn.getName(), ColumnGroup.A, 0)) { return null; } StorageSummary summary = JsonHelper.fromJson(summaryColumn.getStringValue(), StorageSummary.class); // Check that all the chunks are available. Some may still be in the process of being written or replicated. if (columns.size() < 1 + summary.getChunkCount()) { return null; } for (int chunkId = 0; chunkId < summary.getChunkCount(); chunkId++) { Column<Composite> presence = columns.getColumnByIndex(chunkId + 1); if (presence == null || !matches(presence.getName(), ColumnGroup.B, chunkId) || presence.getTimestamp() != summary.getTimestamp()) { return null; } } return summary; }
C lastColumn = columns.getColumnByIndex(columns.size() - 1).getName(); Iterator<List<Column<C>>> columnsIter = Iterators.partition( columnScan(row.getRawKey(), sourcePlacement, sourceCf, lastColumn, null,
private Record newRecord(Key key, ByteBuffer rowKey, ColumnList<UUID> columns, int largeRowThreshold, ReadConsistency consistency, @Nullable final Instant cutoffTime) { Iterator<Map.Entry<UUID, Change>> changeIter = decodeChanges(getFilteredColumnIter(columns.iterator(), cutoffTime)); Iterator<Map.Entry<UUID, Compaction>> compactionIter = decodeCompactions(getFilteredColumnIter(columns.iterator(), cutoffTime)); Iterator<RecordEntryRawMetadata> rawMetadataIter = rawMetadata(getFilteredColumnIter(columns.iterator(), cutoffTime)); if (columns.size() >= largeRowThreshold) { // A large row such that the first query likely returned only a subset of all the columns. Lazily fetch // the rest while ensuring we never load all columns into memory at the same time. The current // Compactor+Resolver implementation must scan the row twice: once to find compaction records and once to // find deltas. So we must call columnScan() twice, once for each. UUID lastColumn = columns.getColumnByIndex(columns.size() - 1).getName(); AstyanaxTable table = (AstyanaxTable) key.getTable(); AstyanaxStorage storage = table.getReadStorage(); DeltaPlacement placement = (DeltaPlacement) storage.getPlacement(); ColumnFamily<ByteBuffer, UUID> columnFamily = placement.getDeltaColumnFamily(); // Execute the same scan 3 times, returning 3 iterators that process the results in different ways. In // practice at most two of the iterators are actually consumed (one or more is ignored) so the columnScan // should avoid actually doing any work until the first item is fetched from the iterator. changeIter = Iterators.concat(changeIter, decodeChanges( getFilteredColumnIter(columnScan(rowKey, placement, columnFamily, lastColumn, null, false, Long.MAX_VALUE, 1, consistency), cutoffTime))); compactionIter = Iterators.concat(compactionIter, decodeCompactions( getFilteredColumnIter(columnScan(rowKey, placement, columnFamily, lastColumn, null, false, Long.MAX_VALUE, 1, consistency), cutoffTime))); rawMetadataIter = Iterators.concat(rawMetadataIter, rawMetadata( getFilteredColumnIter(columnScan(rowKey, placement, columnFamily, lastColumn, null, false, Long.MAX_VALUE, 1, consistency), cutoffTime))); } return new RecordImpl(key, compactionIter, changeIter, rawMetadataIter); }
private Record newRecord(Key key, ByteBuffer rowKey, ColumnList<UUID> columns, int largeRowThreshold, ReadConsistency consistency, @Nullable final Instant cutoffTime) { Iterator<Map.Entry<UUID, Change>> changeIter = decodeChanges(getFilteredColumnIter(columns.iterator(), cutoffTime)); Iterator<Map.Entry<UUID, Compaction>> compactionIter = decodeCompactions(getFilteredColumnIter(columns.iterator(), cutoffTime)); Iterator<RecordEntryRawMetadata> rawMetadataIter = rawMetadata(getFilteredColumnIter(columns.iterator(), cutoffTime)); if (columns.size() >= largeRowThreshold) { // A large row such that the first query likely returned only a subset of all the columns. Lazily fetch // the rest while ensuring we never load all columns into memory at the same time. The current // Compactor+Resolver implementation must scan the row twice: once to find compaction records and once to // find deltas. So we must call columnScan() twice, once for each. UUID lastColumn = columns.getColumnByIndex(columns.size() - 1).getName(); AstyanaxTable table = (AstyanaxTable) key.getTable(); AstyanaxStorage storage = table.getReadStorage(); DeltaPlacement placement = (DeltaPlacement) storage.getPlacement(); ColumnFamily<ByteBuffer, UUID> columnFamily = placement.getDeltaColumnFamily(); // Execute the same scan 3 times, returning 3 iterators that process the results in different ways. In // practice at most two of the iterators are actually consumed (one or more is ignored) so the columnScan // should avoid actually doing any work until the first item is fetched from the iterator. changeIter = Iterators.concat(changeIter, decodeChanges( getFilteredColumnIter(columnScan(rowKey, placement, columnFamily, lastColumn, null, false, Long.MAX_VALUE, 1, consistency), cutoffTime))); compactionIter = Iterators.concat(compactionIter, decodeCompactions( getFilteredColumnIter(columnScan(rowKey, placement, columnFamily, lastColumn, null, false, Long.MAX_VALUE, 1, consistency), cutoffTime))); rawMetadataIter = Iterators.concat(rawMetadataIter, rawMetadata( getFilteredColumnIter(columnScan(rowKey, placement, columnFamily, lastColumn, null, false, Long.MAX_VALUE, 1, consistency), cutoffTime))); } return new RecordImpl(key, compactionIter, changeIter, rawMetadataIter); }
UUID lastColumn = columns.getColumnByIndex(columns.size() - 1).getName(); Iterator<List<Column<UUID>>> columnsIter = Iterators.partition( columnScan(row.getRawKey(), sourcePlacement, sourceCf, lastColumn, null,
C lastColumn = columns.getColumnByIndex(columns.size() - 1).getName(); Iterator<List<Column<C>>> columnsIter = Iterators.partition( columnScan(row.getRawKey(), sourcePlacement, sourceCf, lastColumn, null,
UUID lastColumn = columns.getColumnByIndex(columns.size() - 1).getName(); Iterator<List<Column<UUID>>> columnsIter = Iterators.partition( columnScan(row.getRawKey(), sourcePlacement, sourceCf, lastColumn, null,
private Record newRecord(Key key, ByteBuffer rowKey, ColumnList<DeltaKey> columns, int largeRowThreshold, ReadConsistency consistency, @Nullable final Instant cutoffTime) { Iterator<Column<DeltaKey>> changeIter = getFilteredColumnIter(columns.iterator(), cutoffTime); Iterator<Column<DeltaKey>> compactionIter = getFilteredColumnIter(columns.iterator(), cutoffTime); Iterator<Column<DeltaKey>> rawMetadataIter = getFilteredColumnIter(columns.iterator(), cutoffTime); if (columns.size() >= largeRowThreshold) { // A large row such that the first query likely returned only a subset of all the columns. Lazily fetch // the rest while ensuring we never load all columns into memory at the same time. The current // Compactor+Resolver implementation must scan the row twice: once to find compaction records and once to // find deltas. So we must call columnScan() twice, once for each. DeltaKey lastColumn = columns.getColumnByIndex(columns.size() - 1).getName(); AstyanaxTable table = (AstyanaxTable) key.getTable(); AstyanaxStorage storage = table.getReadStorage(); DeltaPlacement placement = (DeltaPlacement) storage.getPlacement(); ColumnFamily<ByteBuffer, DeltaKey> columnFamily = placement.getBlockedDeltaColumnFamily(); // Execute the same scan 3 times, returning 3 iterators that process the results in different ways. In // practice at most two of the iterators are actually consumed (one or more is ignored) so the columnScan // should avoid actually doing any work until the first item is fetched from the iterator. changeIter = Iterators.concat(changeIter, getFilteredColumnIter(columnScan(rowKey, placement, columnFamily, lastColumn, null, false, _deltaKeyInc, Long.MAX_VALUE, 1, consistency), cutoffTime)); compactionIter = Iterators.concat(compactionIter, getFilteredColumnIter(columnScan(rowKey, placement, columnFamily, lastColumn, null, false, _deltaKeyInc, Long.MAX_VALUE, 1, consistency), cutoffTime)); rawMetadataIter = Iterators.concat(rawMetadataIter, getFilteredColumnIter(columnScan(rowKey, placement, columnFamily, lastColumn, null, false, _deltaKeyInc, Long.MAX_VALUE, 1, consistency), cutoffTime)); } Iterator<Map.Entry<UUID, Change>> deltaChangeIter = decodeChanges(new AstyanaxDeltaIterator(changeIter, false, _deltaPrefixLength, ByteBufferUtil.bytesToHex((rowKey)))); Iterator<Map.Entry<UUID, Compaction>> deltaCompactionIter = decodeCompactions(new AstyanaxDeltaIterator(compactionIter, false, _deltaPrefixLength, ByteBufferUtil.bytesToHex((rowKey)))); Iterator<RecordEntryRawMetadata> deltaRawMetadataIter = rawMetadata(new AstyanaxDeltaIterator(rawMetadataIter, false, _deltaPrefixLength, ByteBufferUtil.bytesToHex((rowKey)))); return new RecordImpl(key, deltaCompactionIter, deltaChangeIter, deltaRawMetadataIter); }
private Record newRecord(Key key, ByteBuffer rowKey, ColumnList<DeltaKey> columns, int largeRowThreshold, ReadConsistency consistency, @Nullable final Instant cutoffTime) { Iterator<Column<DeltaKey>> changeIter = getFilteredColumnIter(columns.iterator(), cutoffTime); Iterator<Column<DeltaKey>> compactionIter = getFilteredColumnIter(columns.iterator(), cutoffTime); Iterator<Column<DeltaKey>> rawMetadataIter = getFilteredColumnIter(columns.iterator(), cutoffTime); if (columns.size() >= largeRowThreshold) { // A large row such that the first query likely returned only a subset of all the columns. Lazily fetch // the rest while ensuring we never load all columns into memory at the same time. The current // Compactor+Resolver implementation must scan the row twice: once to find compaction records and once to // find deltas. So we must call columnScan() twice, once for each. DeltaKey lastColumn = columns.getColumnByIndex(columns.size() - 1).getName(); AstyanaxTable table = (AstyanaxTable) key.getTable(); AstyanaxStorage storage = table.getReadStorage(); DeltaPlacement placement = (DeltaPlacement) storage.getPlacement(); ColumnFamily<ByteBuffer, DeltaKey> columnFamily = placement.getBlockedDeltaColumnFamily(); // Execute the same scan 3 times, returning 3 iterators that process the results in different ways. In // practice at most two of the iterators are actually consumed (one or more is ignored) so the columnScan // should avoid actually doing any work until the first item is fetched from the iterator. changeIter = Iterators.concat(changeIter, getFilteredColumnIter(columnScan(rowKey, placement, columnFamily, lastColumn, null, false, _deltaKeyInc, Long.MAX_VALUE, 1, consistency), cutoffTime)); compactionIter = Iterators.concat(compactionIter, getFilteredColumnIter(columnScan(rowKey, placement, columnFamily, lastColumn, null, false, _deltaKeyInc, Long.MAX_VALUE, 1, consistency), cutoffTime)); rawMetadataIter = Iterators.concat(rawMetadataIter, getFilteredColumnIter(columnScan(rowKey, placement, columnFamily, lastColumn, null, false, _deltaKeyInc, Long.MAX_VALUE, 1, consistency), cutoffTime)); } Iterator<Map.Entry<UUID, Change>> deltaChangeIter = decodeChanges(new AstyanaxDeltaIterator(changeIter, false, _deltaPrefixLength, ByteBufferUtil.bytesToHex((rowKey)))); Iterator<Map.Entry<UUID, Compaction>> deltaCompactionIter = decodeCompactions(new AstyanaxDeltaIterator(compactionIter, false, _deltaPrefixLength, ByteBufferUtil.bytesToHex((rowKey)))); Iterator<RecordEntryRawMetadata> deltaRawMetadataIter = rawMetadata(new AstyanaxDeltaIterator(rawMetadataIter, false, _deltaPrefixLength, ByteBufferUtil.bytesToHex((rowKey)))); return new RecordImpl(key, deltaCompactionIter, deltaChangeIter, deltaRawMetadataIter); }