@Test public void zeroLimitNotAllowed() { expectedException.expect(IllegalArgumentException.class); Options.limit(0); }
@Test public void negativeLimitsNotAllowed() { expectedException.expect(IllegalArgumentException.class); Options.limit(-1); }
@Override // Since Options mandates checking hasXX() before XX() is called, the equals & hashCode look more // complicated than usual. public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } Options that = (Options) o; return (!hasLimit() && !that.hasLimit() || hasLimit() && that.hasLimit() && Objects.equals(limit(), that.limit())) && (!hasPrefetchChunks() && !that.hasPrefetchChunks() || hasPrefetchChunks() && that.hasPrefetchChunks() && Objects.equals(prefetchChunks(), that.prefetchChunks())) && (!hasPageSize() && !that.hasPageSize() || hasPageSize() && that.hasPageSize() && Objects.equals(pageSize(), that.pageSize())) && Objects.equals(pageToken(), that.pageToken()) && Objects.equals(filter(), that.filter()); }
@Override public Status scan( String table, String startKey, int recordCount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { if (queriesForReads) { return scanUsingQuery(table, startKey, recordCount, fields, result); } Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; KeySet keySet = KeySet.newBuilder().addRange(KeyRange.closedClosed(Key.of(startKey), Key.of())).build(); try (ResultSet resultSet = dbClient.singleUse(timestampBound) .read(table, keySet, columns, Options.limit(recordCount))) { while (resultSet.next()) { HashMap<String, ByteIterator> row = new HashMap<>(); decodeStruct(columns, resultSet, row); result.add(row); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "scan()", e); return Status.ERROR; } }
@Test public void allOptionsPresent() { Options options = Options.fromReadOptions(Options.limit(10), Options.prefetchChunks(1)); assertThat(options.hasLimit()).isTrue(); assertThat(options.limit()).isEqualTo(10); assertThat(options.hasPrefetchChunks()).isTrue(); assertThat(options.prefetchChunks()).isEqualTo(1); }
@Test public void readOptTest() { int limit = 3; Options opts = Options.fromReadOptions(Options.limit(limit)); assertThat(opts.toString()).isEqualTo("limit: " + Integer.toString(limit) + " "); assertThat(opts.hashCode()).isEqualTo(964); }
@Test public void readEquality() { Options o1; Options o2; Options o3; o1 = Options.fromReadOptions(); o2 = Options.fromReadOptions(); assertThat(o1.equals(o2)).isTrue(); o2 = Options.fromReadOptions(Options.limit(1)); assertThat(o1.equals(o2)).isFalse(); assertThat(o2.equals(o1)).isFalse(); o3 = Options.fromReadOptions(Options.limit(1)); assertThat(o2.equals(o3)).isTrue(); o3 = Options.fromReadOptions(Options.limit(2)); assertThat(o2.equals(o3)).isFalse(); }
.addAllColumns(columns); if (readOptions.hasLimit()) { builder.setLimit(readOptions.limit());
@Test public void readUsingIndexInstance() { Partition p = Partition.createReadPartition( partitionToken, partitionOptions, table, index, keys, columns, readOptions); assertThat(p.getPartitionToken()).isEqualTo(partitionToken); assertThat(p.getTable()).isEqualTo(table); assertThat(p.getIndex()).isEqualTo(index); assertThat(p.getKeys()).isEqualTo(keys); assertThat(p.getColumns()).isEqualTo(columns); assertTrue(p.getReadOptions().hasLimit()); assertThat(p.getReadOptions().limit()).isEqualTo(10); assertNull(p.getStatement()); assertNull(p.getQueryOptions()); // Test serialization. reserializeAndAssert(p); }
.singleUse(TimestampBound.strong()) .readUsingIndex( TABLE_NAME, INDEX_NAME, keySet, ALL_COLUMNS, Options.limit(limit)) : client .singleUse(TimestampBound.strong()) .singleUse(TimestampBound.strong()) .readUsingIndex( TABLE_NAME, DESC_INDEX_NAME, keySet, ALL_COLUMNS, Options.limit(limit)) : client .singleUse(TimestampBound.strong()) ? client .singleUse(TimestampBound.strong()) .read(TABLE_NAME, keySet, ALL_COLUMNS, Options.limit(limit)) : client.singleUse(TimestampBound.strong()).read(TABLE_NAME, keySet, ALL_COLUMNS); break;
@Test public void readInstance() { Partition p = Partition.createReadPartition( partitionToken, partitionOptions, table, null /*index*/, keys, columns, readOptions); assertThat(p.getPartitionToken()).isEqualTo(partitionToken); assertThat(p.getTable()).isEqualTo(table); assertThat(p.getKeys()).isEqualTo(keys); assertThat(p.getColumns()).isEqualTo(columns); assertTrue(p.getReadOptions().hasLimit()); assertThat(p.getReadOptions().limit()).isEqualTo(10); assertNull(p.getIndex()); assertNull(p.getStatement()); assertNull(p.getQueryOptions()); // Test serialization. reserializeAndAssert(p); }
@Override // Since Options mandates checking hasXX() before XX() is called, the equals & hashCode look more // complicated than usual. public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } Options that = (Options) o; return (!hasLimit() && !that.hasLimit() || hasLimit() && that.hasLimit() && Objects.equals(limit(), that.limit())) && (!hasPrefetchChunks() && !that.hasPrefetchChunks() || hasPrefetchChunks() && that.hasPrefetchChunks() && Objects.equals(prefetchChunks(), that.prefetchChunks())) && (!hasPageSize() && !that.hasPageSize() || hasPageSize() && that.hasPageSize() && Objects.equals(pageSize(), that.pageSize())) && Objects.equals(pageToken(), that.pageToken()) && Objects.equals(filter(), that.filter()); }
.addAllColumns(columns); if (readOptions.hasLimit()) { builder.setLimit(readOptions.limit());