@Test(groups = "short") @CassandraVersion(value = "2.0.0") public void should_reuse_wrapped_bound_statement_for_multipage_query() { loadBalancingPolicy.customStatementsHandled.set(0); for (int v = 1; v <= 100; v++) session().execute(new SimpleStatement(INSERT_MULTIPAGE_QUERY, "key_prepared_multipage", v)); PreparedStatement ps = session().prepare(SELECT_MULTIPAGE_QUERY); BoundStatement bs = ps.bind("key_prepared_multipage"); bs.setFetchSize(1); CustomStatement customStatement = new CustomStatement(bs); ResultSet rs = session().execute(customStatement); assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(1); Iterator<Row> it = rs.iterator(); assertThat(it.hasNext()).isTrue(); it.next(); assertThat(rs.getExecutionInfo().getStatement()).isEqualTo(customStatement); assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(1); assertThat(it.hasNext()).isTrue(); it.next(); assertThat(rs.getExecutionInfo().getStatement()).isEqualTo(customStatement); assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(2); }
/** * Validates that {@link PagingState} can be reused with the same {@link BoundStatement}. * * @test_category paging * @expected_result {@link ResultSet} from the query with the provided paging state starts from * the subsequent row from the first query. */ @Test(groups = "short") @CassandraVersion("2.0.0") public void should_be_able_to_use_state_with_bound_statement() { PreparedStatement prepared = session().prepare("SELECT v from test where k=?"); BoundStatement bs = prepared.bind(KEY); ResultSet result = session().execute(bs.setFetchSize(20)); int pageSize = result.getAvailableWithoutFetching(); PagingState pagingState = result.getExecutionInfo().getPagingState(); result = session().execute(bs.setFetchSize(20).setPagingState(pagingState)); // We have the result starting from the next page we stopped assertThat(result.one().getInt("v")).isEqualTo(pageSize); }
/** * Validates that {@link PagingState} cannot be reused with a different {@link BoundStatement} * than the original, even if its source {@link PreparedStatement} was the same. * * @test_category paging * @expected_result A failure is thrown when setting paging state on a different {@link * BoundStatement}. */ @Test( groups = "short", expectedExceptions = {PagingStateException.class}) @CassandraVersion("2.0.0") public void should_not_be_able_to_use_state_with_different_bound_statement() { PreparedStatement prepared = session().prepare("SELECT v from test where k=?"); BoundStatement bs0 = prepared.bind(KEY); ResultSet result = session().execute(bs0.setFetchSize(20)); PagingState pagingState = result.getExecutionInfo().getPagingState(); BoundStatement bs1 = prepared.bind("different_key"); session().execute(bs1.setFetchSize(20).setPagingState(pagingState)); }
@Override void configure(BoundStatement boundStatement) { boundStatement.setFetchSize(fetchSize); } }
ResultSet rows = session().execute(ps.bind().setFetchSize(2)); assertThat(rows.isFullyFetched()).isFalse(); MD5Digest idBefore = ps.getPreparedId().resultSetMetadata.id;
private void doExecute() throws SQLException { if (LOG.isTraceEnabled()) LOG.trace("CQL: " + cql); try { resetResults(); if (this.connection.debugMode) System.out.println("CQL: "+ cql); if(this.statement.getFetchSize()==0) // force paging to avoid timeout and node harm... this.statement.setFetchSize(100); this.statement.setConsistencyLevel(this.connection.defaultConsistencyLevel); for(int i=0; i<this.statement.preparedStatement().getVariables().size(); i++){ // Set parameters to null if unset if(!this.statement.isSet(i)){ this.statement.setToNull(i); } } currentResultSet = new CassandraResultSet(this, this.connection.getSession().execute(this.statement)); } catch (Exception e) { throw new SQLTransientException(e); } }
@Test(groups = "short") @CassandraVersion(value = "2.0.0") public void should_reuse_wrapped_bound_statement_for_multipage_query() { loadBalancingPolicy.customStatementsHandled.set(0); for (int v = 1; v <= 100; v++) session().execute(new SimpleStatement(INSERT_MULTIPAGE_QUERY, "key_prepared_multipage", v)); PreparedStatement ps = session().prepare(SELECT_MULTIPAGE_QUERY); BoundStatement bs = ps.bind("key_prepared_multipage"); bs.setFetchSize(1); CustomStatement customStatement = new CustomStatement(bs); ResultSet rs = session().execute(customStatement); assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(1); Iterator<Row> it = rs.iterator(); assertThat(it.hasNext()).isTrue(); it.next(); assertThat(rs.getExecutionInfo().getStatement()).isEqualTo(customStatement); assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(1); assertThat(it.hasNext()).isTrue(); it.next(); assertThat(rs.getExecutionInfo().getStatement()).isEqualTo(customStatement); assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(2); }
ListenableFuture<Map<Long, Long>> getTraceIdsByServiceNames(List<String> serviceNames, long endTs, long lookback, int limit) { if (serviceNames.isEmpty()) return immediateFuture(Collections.<Long, Long>emptyMap()); long startTs = Math.max(endTs - lookback, 0); // >= 1970 try { // This guards use of "in" query to give people a little more time to move off Cassandra 2.1 // Note that it will still fail when serviceNames.size() > 1 BoundStatement bound = serviceNames.size() == 1 ? CassandraUtil.bindWithName(selectTraceIdsByServiceName, "select-trace-ids-by-service-name") .setString("service_name", serviceNames.get(0)) .setSet("bucket", buckets) .setBytesUnsafe("start_ts", timestampCodec.serialize(startTs)) .setBytesUnsafe("end_ts", timestampCodec.serialize(endTs)) .setInt("limit_", limit) : CassandraUtil.bindWithName(selectTraceIdsByServiceNames, "select-trace-ids-by-service-names") .setList("service_name", serviceNames) .setSet("bucket", buckets) .setBytesUnsafe("start_ts", timestampCodec.serialize(startTs)) .setBytesUnsafe("end_ts", timestampCodec.serialize(endTs)) .setInt("limit_", limit); bound.setFetchSize(Integer.MAX_VALUE); return transform(session.executeAsync(bound), traceIdToTimestamp); } catch (RuntimeException ex) { return immediateFailedFuture(ex); } }
/** * Validates that {@link PagingState} can be reused with the same {@link BoundStatement}. * * @test_category paging * @expected_result {@link ResultSet} from the query with the provided paging state starts from * the subsequent row from the first query. */ @Test(groups = "short") @CassandraVersion("2.0.0") public void should_be_able_to_use_state_with_bound_statement() { PreparedStatement prepared = session().prepare("SELECT v from test where k=?"); BoundStatement bs = prepared.bind(KEY); ResultSet result = session().execute(bs.setFetchSize(20)); int pageSize = result.getAvailableWithoutFetching(); PagingState pagingState = result.getExecutionInfo().getPagingState(); result = session().execute(bs.setFetchSize(20).setPagingState(pagingState)); // We have the result starting from the next page we stopped assertThat(result.one().getInt("v")).isEqualTo(pageSize); }
/** * Validates that {@link PagingState} cannot be reused with a different {@link BoundStatement} * than the original, even if its source {@link PreparedStatement} was the same. * * @test_category paging * @expected_result A failure is thrown when setting paging state on a different {@link * BoundStatement}. */ @Test( groups = "short", expectedExceptions = {PagingStateException.class}) @CassandraVersion("2.0.0") public void should_not_be_able_to_use_state_with_different_bound_statement() { PreparedStatement prepared = session().prepare("SELECT v from test where k=?"); BoundStatement bs0 = prepared.bind(KEY); ResultSet result = session().execute(bs0.setFetchSize(20)); PagingState pagingState = result.getExecutionInfo().getPagingState(); BoundStatement bs1 = prepared.bind("different_key"); session().execute(bs1.setFetchSize(20).setPagingState(pagingState)); }
@Override public Observable<Row> findStringData(MetricId<String> id, long startTime, long endTime, int limit, Order order, int pageSize) { if (order == Order.ASC) { if (limit <= 0) { return rxSession.executeAndFetch(findStringDataByDateRangeExclusiveASC.bind(id.getTenantId(), STRING.getCode(), id.getName(), DPART, getTimeUUID(startTime), getTimeUUID(endTime)) .setFetchSize(pageSize)); } else { return rxSession.executeAndFetch(findStringDataByDateRangeExclusiveWithLimitASC.bind( id.getTenantId(), STRING.getCode(), id.getName(), DPART, getTimeUUID(startTime), getTimeUUID(endTime), limit).setFetchSize(pageSize)); } } else { if (limit <= 0) { return rxSession.executeAndFetch(findStringDataByDateRangeExclusive.bind(id.getTenantId(), STRING.getCode(), id.getName(), DPART, getTimeUUID(startTime), getTimeUUID(endTime)) .setFetchSize(pageSize)); } else { return rxSession.executeAndFetch(findStringDataByDateRangeExclusiveWithLimit.bind(id.getTenantId(), STRING.getCode(), id.getName(), DPART, getTimeUUID(startTime), getTimeUUID(endTime), limit).setFetchSize(pageSize)); } } }
@Override public Observable<Row> findStringData(MetricId<String> id, long startTime, long endTime, int limit, Order order, int pageSize) { if (order == Order.ASC) { if (limit <= 0) { return rxSession.executeAndFetch(findStringDataByDateRangeExclusiveASC.bind(id.getTenantId(), STRING.getCode(), id.getName(), DPART, getTimeUUID(startTime), getTimeUUID(endTime)) .setFetchSize(pageSize)); } else { return rxSession.executeAndFetch(findStringDataByDateRangeExclusiveWithLimitASC.bind( id.getTenantId(), STRING.getCode(), id.getName(), DPART, getTimeUUID(startTime), getTimeUUID(endTime), limit).setFetchSize(pageSize)); } } else { if (limit <= 0) { return rxSession.executeAndFetch(findStringDataByDateRangeExclusive.bind(id.getTenantId(), STRING.getCode(), id.getName(), DPART, getTimeUUID(startTime), getTimeUUID(endTime)) .setFetchSize(pageSize)); } else { return rxSession.executeAndFetch(findStringDataByDateRangeExclusiveWithLimit.bind(id.getTenantId(), STRING.getCode(), id.getName(), DPART, getTimeUUID(startTime), getTimeUUID(endTime), limit).setFetchSize(pageSize)); } } }
@Override protected ResultSetFuture newFuture() { Statement bound = factory .preparedStatement .bind() .setString("service_name", input.service_name()) .setBytesUnsafe("start_ts", factory.timestampCodec.serialize(input.start_ts())) .setBytesUnsafe("end_ts", factory.timestampCodec.serialize(input.end_ts())) .setInt("limit_", input.limit_()) .setFetchSize(Integer.MAX_VALUE); // NOTE in the new driver, we also set this to limit return factory.session.executeAsync(bound); }
@Override protected ResultSetFuture newFuture() { Statement bound = factory .preparedStatement .bind() .setString("service_span_name", input.service_span_name()) .setBytesUnsafe("start_ts", factory.timestampCodec.serialize(input.start_ts())) .setBytesUnsafe("end_ts", factory.timestampCodec.serialize(input.end_ts())) .setInt("limit_", input.limit_()) .setFetchSize(Integer.MAX_VALUE); // NOTE in the new driver, we also set this to limit return factory.session.executeAsync(bound); }
@Override protected ResultSetFuture newFuture() { Statement bound = factory .preparedStatement .bind() .setList("service_names", input.service_names()) .setBytesUnsafe("start_ts", factory.timestampCodec.serialize(input.start_ts())) .setBytesUnsafe("end_ts", factory.timestampCodec.serialize(input.end_ts())) .setInt("limit_", input.limit_()) .setFetchSize(Integer.MAX_VALUE); // NOTE in the new driver, we also set this to limit return factory.session.executeAsync(bound); }
@Override public KeyIterator getKeys(final SliceQuery query, final StoreTransaction txh) throws BackendException { if (this.storeManager.getFeatures().hasOrderedScan()) { throw new PermanentBackendException("This operation is only allowed when a random partitioner (md5 or murmur3) is used."); } return Try.of(() -> new CQLResultSetKeyIterator( query, this.getter, this.session.execute(this.getKeysAll.bind() .setBytes(SLICE_START_BINDING, query.getSliceStart().asByteBuffer()) .setBytes(SLICE_END_BINDING, query.getSliceEnd().asByteBuffer()) .setFetchSize(this.storeManager.getPageSize()) .setConsistencyLevel(getTransaction(txh).getReadConsistencyLevel())))) .getOrElseThrow(EXCEPTION_MAPPER); } }
@Override protected ResultSetFuture newFuture() { ByteBuffer annotation = CassandraUtil.toByteBuffer(input.annotation()); Statement bound = factory .preparedStatement .bind() .setBytes("annotation", annotation) .setBytesUnsafe("start_ts", factory.timestampCodec.serialize(input.start_ts())) .setBytesUnsafe("end_ts", factory.timestampCodec.serialize(input.end_ts())) .setInt("limit_", input.limit_()) .setFetchSize(Integer.MAX_VALUE); // NOTE in the new driver, we also set this to limit return factory.session.executeAsync(bound); }
@Override protected ResultSetFuture newFuture() { BoundStatement bound = preparedStatement.bind(); if (input.l_service() != null) bound.setString("l_service", input.l_service()); if (input.annotation_query() != null) { bound.setString("annotation_query", input.annotation_query()); } bound .setUUID("start_ts", input.start_ts()) .setUUID("end_ts", input.end_ts()) .setInt("limit_", input.limit_()) .setFetchSize(input.limit_()); return factory.session.executeAsync(bound); }
@Override public KeyIterator getKeys(final KeyRangeQuery query, final StoreTransaction txh) throws BackendException { if (!this.storeManager.getFeatures().hasOrderedScan()) { throw new PermanentBackendException("This operation is only allowed when the byteorderedpartitioner is used."); } final Metadata metadata = this.session.getCluster().getMetadata(); return Try.of(() -> new CQLResultSetKeyIterator( query, this.getter, this.session.execute(this.getKeysRanged.bind() .setToken(KEY_START_BINDING, metadata.newToken(query.getKeyStart().asByteBuffer())) .setToken(KEY_END_BINDING, metadata.newToken(query.getKeyEnd().asByteBuffer())) .setBytes(SLICE_START_BINDING, query.getSliceStart().asByteBuffer()) .setBytes(SLICE_END_BINDING, query.getSliceEnd().asByteBuffer()) .setFetchSize(this.storeManager.getPageSize()) .setConsistencyLevel(getTransaction(txh).getReadConsistencyLevel())))) .getOrElseThrow(EXCEPTION_MAPPER); }
@Override protected ResultSetFuture newFuture() { BoundStatement bound = preparedStatement .bind() .setString("service", input.service()) .setString("span", input.span()) .setInt("bucket", input.bucket()); if (input.start_duration() != null) { bound.setLong("start_duration", input.start_duration()); bound.setLong("end_duration", input.end_duration()); } bound .setUUID("start_ts", input.start_ts()) .setUUID("end_ts", input.end_ts()) .setInt("limit_", input.limit_()) .setFetchSize(input.limit_()); return factory.session.executeAsync(bound); }