@Override public boolean isCheckpointCommitted(int subtaskIdx, long checkpointId) { // Pending checkpointed buffers are committed in ascending order of their // checkpoint id. This way we can tell if a checkpointed buffer was committed // just by asking the third-party storage system for the last checkpoint id // committed by the specified subtask. Long lastCommittedCheckpoint = lastCommittedCheckpoints.get(subtaskIdx); if (lastCommittedCheckpoint == null) { String statement = String.format( "SELECT checkpoint_id FROM %s.%s where sink_id='%s' and sub_id=%d;", keySpace, table, operatorId, subtaskIdx); Iterator<Row> resultIt = session.execute(statement).iterator(); if (resultIt.hasNext()) { lastCommittedCheckpoint = resultIt.next().getLong("checkpoint_id"); lastCommittedCheckpoints.put(subtaskIdx, lastCommittedCheckpoint); } } return lastCommittedCheckpoint != null && checkpointId <= lastCommittedCheckpoint; } }
Iterator<com.datastax.driver.core.Row> results = resultSet.iterator();
@Override public MapKeyResults getAllKeys(final MapScope scope, final String cursor, final int limit ){ final int[] buckets = BUCKET_LOCATOR.getAllBuckets( scope.getName() ); final List<ByteBuffer> partitionKeys = new ArrayList<>(NUM_BUCKETS.length); for (int bucket : buckets) { partitionKeys.add(getMapKeyPartitionKey(scope, bucket)); } Clause in = QueryBuilder.in("key", partitionKeys); Statement statement; if( isBlank(cursor) ){ statement = QueryBuilder.select().all().from(MAP_KEYS_TABLE) .where(in) .setFetchSize(limit); }else{ statement = QueryBuilder.select().all().from(MAP_KEYS_TABLE) .where(in) .setFetchSize(limit) .setPagingState(PagingState.fromString(cursor)); } ResultSet resultSet = session.execute(statement); PagingState pagingState = resultSet.getExecutionInfo().getPagingState(); final List<String> keys = new ArrayList<>(); Iterator<Row> resultIterator = resultSet.iterator(); int size = 0; while( resultIterator.hasNext() && size < limit){ size++; keys.add((String)DataType.text().deserialize(resultIterator.next().getBytes("column1"), ProtocolVersion.NEWEST_SUPPORTED)); } return new MapKeyResults(pagingState != null ? pagingState.toString() : null, keys); }
ResultSet resSet = fut.getUninterruptibly(); if (resSet == null || !resSet.iterator().hasNext()) return;
@Override public Object generate(Client<?> client, String dataType) { final String generatedId = "Select now() from system_schema.columns"; ResultSet rSet = ((DSClient) client).execute(generatedId, null); UUID uuid = rSet.iterator().next().getUUID(0); return uuid; } }
/** * Iterate and return. * * @param rSet * the r set * @return the list */ private List iterateAndReturn(ResultSet rSet) { Iterator<Row> rowIter = rSet.iterator(); List results = new ArrayList(); while (rowIter.hasNext()) { Row row = rowIter.next(); ColumnDefinitions columnDefs = row.getColumnDefinitions(); Iterator<Definition> columnDefIter = columnDefs.iterator(); Map rowData = new HashMap(); while (columnDefIter.hasNext()) { Definition columnDef = columnDefIter.next(); rowData.put(columnDef.getName(), DSClientUtilities.assign(row, null, null, columnDef.getType().getName(), null, columnDef.getName(), null, null)); } results.add(rowData); } return results; }
private static Set<Integer> queryByNameIndex( CassandraSessionPool.Session session, String table, String name) { if (name.isEmpty()) { return ImmutableSet.of(); } Select select = QueryBuilder.select().from(table); select.where(CassandraTable.formatEQ(HugeKeys.NAME, name)); try { Iterator<Row> it = session.execute(select).iterator(); if (!it.hasNext()) { return ImmutableSet.of(); } Set<Integer> ids = it.next().getSet(ELEMENT_IDS, Integer.class); assert !it.hasNext(); return ids; } catch (DriverException e) { throw new BackendException("Failed to query by name '%s'", e, name); } } }
/** * Iterate and return. * * @param rSet * the r set * @param entityClazz * the entity clazz * @param metadata * the metadata * @return the list */ private List iterateAndReturn(ResultSet rSet, EntityMetadata metadata) { MetamodelImpl metaModel = (MetamodelImpl) kunderaMetadata.getApplicationMetadata().getMetamodel(metadata.getPersistenceUnit()); EntityType entityType = metaModel.entity(metadata.getEntityClazz()); Iterator<Row> rowIter = rSet.iterator(); List results = new ArrayList(); Map<String, Object> relationalValues = new HashMap<String, Object>(); while (rowIter.hasNext()) { Object entity = null; Row row = rowIter.next(); populateObjectFromRow(metadata, metaModel, entityType, results, relationalValues, entity, row); } return results; }
private static Set<String> queryByLabelIndex( CassandraSessionPool.Session session, String table, Id label) { Select select = QueryBuilder.select().from(table); select.where(CassandraTable.formatEQ(HugeKeys.LABEL, label.asLong())); try { Iterator<Row> it = session.execute(select).iterator(); if (!it.hasNext()) { return ImmutableSet.of(); } Set<String> ids = it.next().getSet(ELEMENT_IDS, String.class); assert !it.hasNext(); return ids; } catch (DriverException e) { throw new BackendException("Failed to query by label '%s'", e, label); } }
@Override protected List<E> populateEntities(EntityMetadata m, Client client) throws Exception { int count = 0; List results = new ArrayList(); MetamodelImpl metaModel = (MetamodelImpl) kunderaMetadata.getApplicationMetadata().getMetamodel( m.getPersistenceUnit()); EntityType entityType = metaModel.entity(entityMetadata.getEntityClazz()); Map<String, Object> relationalValues = new HashMap<String, Object>(); if (rSet == null) { String parsedQuery = query.onQueryOverCQL3(m, client, metaModel, null); Statement statement = new SimpleStatement(parsedQuery); statement.setFetchSize(fetchSize); rSet = ((DSClient) client).executeStatement(statement); rowIter = rSet.iterator(); } while (rowIter.hasNext() && count++ < fetchSize) { Object entity = null; Row row = rowIter.next(); ((DSClient) client).populateObjectFromRow(entityMetadata, metaModel, entityType, results, relationalValues, entity, row); } return results; }
@Test(groups = "short") @CassandraVersion(value = "2.0.0") public void should_reuse_wrapped_simple_statement_for_multipage_query() { loadBalancingPolicy.customStatementsHandled.set(0); for (int v = 1; v <= 100; v++) session().execute(new SimpleStatement(INSERT_MULTIPAGE_QUERY, "key_simple_multipage", v)); SimpleStatement s = new SimpleStatement(SELECT_MULTIPAGE_QUERY, "key_simple_multipage"); s.setFetchSize(1); CustomStatement customStatement = new CustomStatement(s); ResultSet rs = session().execute(customStatement); assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(1); Iterator<Row> it = rs.iterator(); assertThat(it.hasNext()).isTrue(); it.next(); assertThat(rs.getExecutionInfo().getStatement()).isEqualTo(customStatement); assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(1); assertThat(it.hasNext()).isTrue(); it.next(); assertThat(rs.getExecutionInfo().getStatement()).isEqualTo(customStatement); assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(2); }
@Test(groups = "short") @CassandraVersion(value = "2.0.0") public void should_reuse_wrapped_bound_statement_for_multipage_query() { loadBalancingPolicy.customStatementsHandled.set(0); for (int v = 1; v <= 100; v++) session().execute(new SimpleStatement(INSERT_MULTIPAGE_QUERY, "key_prepared_multipage", v)); PreparedStatement ps = session().prepare(SELECT_MULTIPAGE_QUERY); BoundStatement bs = ps.bind("key_prepared_multipage"); bs.setFetchSize(1); CustomStatement customStatement = new CustomStatement(bs); ResultSet rs = session().execute(customStatement); assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(1); Iterator<Row> it = rs.iterator(); assertThat(it.hasNext()).isTrue(); it.next(); assertThat(rs.getExecutionInfo().getStatement()).isEqualTo(customStatement); assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(1); assertThat(it.hasNext()).isTrue(); it.next(); assertThat(rs.getExecutionInfo().getStatement()).isEqualTo(customStatement); assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(2); }
public CassandraEntryIterator(ResultSet results, Query query, BiFunction<BackendEntry, Row, BackendEntry> merger) { super(query); this.results = results; this.rows = results.iterator(); this.remaining = results.getAvailableWithoutFetching(); this.merger = merger; this.next = null; this.skipOffset(); if (query.paging()) { E.checkState(this.remaining == query.limit() || results.isFullyFetched(), "Unexpected fetched page size: %s", this.remaining); } }
/** * Populates data form secondary tables of entity for given row key. * * @param rowId * the row id * @param entity * the entity * @param metaModel * the meta model * @param metadata * the metadata */ private void populateSecondaryTableData(Object rowId, Object entity, MetamodelImpl metaModel, EntityMetadata metadata) { AbstractManagedType managedType = (AbstractManagedType) metaModel.entity(metadata.getEntityClazz()); List<String> secondaryTables = ((DefaultEntityAnnotationProcessor) managedType.getEntityAnnotation()).getSecondaryTablesName(); for (String tableName : secondaryTables) { StringBuilder builder = createSelectQuery(rowId, metadata, tableName); ResultSet rSet = this.execute(builder.toString(), null); Iterator<Row> rowIter = rSet.iterator(); Row row = rowIter.next(); ColumnDefinitions columnDefs = row.getColumnDefinitions(); Iterator<Definition> columnDefIter = columnDefs.iterator(); entity = iteratorColumns(metadata, metaModel, metaModel.entity(metadata.getEntityClazz()), new HashMap<String, Object>(), entity, row, columnDefIter); } }
@Test(groups = "short") public void should_ignore_read_timeout() { simulateError(1, read_request_timeout); ResultSet rs = query(); assertThat(rs.iterator().hasNext()).isFalse(); // ignore decisions produce empty result sets assertOnReadTimeoutWasCalled(1); assertThat(errors.getIgnores().getCount()).isEqualTo(1); assertThat(errors.getRetries().getCount()).isEqualTo(0); assertThat(errors.getIgnoresOnReadTimeout().getCount()).isEqualTo(1); assertThat(errors.getRetriesOnReadTimeout().getCount()).isEqualTo(0); assertQueried(1, 1); assertQueried(2, 0); assertQueried(3, 0); }
@Test(groups = "short") public void should_drop_a_table() { // Create a table session() .execute( SchemaBuilder.createTable("ks", "DropTable").addPartitionKey("a", DataType.cint())); // Drop the table session().execute(SchemaBuilder.dropTable("ks", "DropTable")); session().execute(SchemaBuilder.dropTable("DropTable").ifExists()); ResultSet rows = session() .execute( "SELECT columnfamily_name " + "FROM system.schema_columnfamilies " + "WHERE keyspace_name='ks' AND columnfamily_name='droptable'"); if (rows.iterator().hasNext()) { fail("This table should have been deleted"); } }
/** * Validates if all results of a query are paged in through a queries result set that the {@link * PagingState} it returns will return an empty set when queried with. * * @test_category paging * @expected_result Query with the {@link PagingState} returns 0 rows. */ @Test(groups = "short") public void should_return_no_rows_when_paged_to_end() { SimpleStatement st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY)); ResultSet result = session().execute(st.setFetchSize(20)); // Consume enough of the iterator to cause all the results to be paged in. Iterator<Row> rowIt = result.iterator(); for (int i = 0; i < 83; i++) { rowIt.next().getInt("v"); } String savedPagingStateString = result.getExecutionInfo().getPagingState().toString(); st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY)); result = session() .execute( st.setFetchSize(20).setPagingState(PagingState.fromString(savedPagingStateString))); assertThat(result.one()).isNull(); }
@Test(groups = "short") public void should_add_and_drop_a_column() { // Create a table, add a column to it with an alter table statement and delete that column session() .execute( SchemaBuilder.createTable("ks", "DropColumn") .ifNotExists() .addPartitionKey("a", DataType.cint())); // Add and then drop a column session() .execute(SchemaBuilder.alterTable("ks", "DropColumn").addColumn("b").type(DataType.cint())); session().execute(SchemaBuilder.alterTable("ks", "DropColumn").dropColumn("b")); // Check that only column a exist ResultSet rows = session() .execute( "SELECT column_name, type, validator " + "FROM system.schema_columns " + "WHERE keyspace_name='ks' AND columnfamily_name='dropcolumn'"); Iterator<Row> iterator = rows.iterator(); verifyNextColumnDefinition( iterator, "a", "partition_key", "org.apache.cassandra.db.marshal.Int32Type"); assertThat(iterator.hasNext()).isFalse(); }