@Override public void init(Cluster cluster, Collection<Host> hosts) { clusterMetadata = cluster.getMetadata(); protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); codecRegistry = cluster.getConfiguration().getCodecRegistry(); childPolicy.init(cluster, hosts); }
newSession = newCluster.connect(keyspaceProperty.getValue()); } else { newSession = newCluster.connect(); newCluster.getConfiguration().getQueryOptions().setConsistencyLevel(ConsistencyLevel.valueOf(consistencyLevel)); Metadata metadata = newCluster.getMetadata();
cluster = Cluster.builder().withCredentials(username, password) .withPort(Integer.valueOf(port)).addContactPoints(hosts).build(); } else { cluster = Cluster.builder().withPort(Integer.valueOf(port)) .addContactPoints(hosts).build(); MAX_CONNECTIONS_PROPERTY); if (maxConnections != null) { cluster.getConfiguration().getPoolingOptions() .setMaxConnectionsPerHost(HostDistance.LOCAL, Integer.valueOf(maxConnections)); CORE_CONNECTIONS_PROPERTY); if (coreConnections != null) { cluster.getConfiguration().getPoolingOptions() .setCoreConnectionsPerHost(HostDistance.LOCAL, Integer.valueOf(coreConnections)); CONNECT_TIMEOUT_MILLIS_PROPERTY); if (connectTimoutMillis != null) { cluster.getConfiguration().getSocketOptions() .setConnectTimeoutMillis(Integer.valueOf(connectTimoutMillis)); READ_TIMEOUT_MILLIS_PROPERTY); if (readTimoutMillis != null) { cluster.getConfiguration().getSocketOptions() .setReadTimeoutMillis(Integer.valueOf(readTimoutMillis));
/** * Copy constructor. * * @param toCopy the object to copy from. */ protected Configuration(Configuration toCopy) { this( toCopy.getPolicies(), toCopy.getProtocolOptions(), toCopy.getPoolingOptions(), toCopy.getSocketOptions(), toCopy.getMetricsOptions(), toCopy.getQueryOptions(), toCopy.getThreadingOptions(), toCopy.getNettyOptions(), toCopy.getCodecRegistry()); }
@Test(groups = "short") public void should_set_registry_on_nested_udts() { ResultSet rows = session().execute("SELECT c1 FROM t1 WHERE pk = 1"); Row row = rows.one(); // here the CodecRegistry will create a codec on-the-fly using the UserType received from the // resultset metadata UDTValue udt1 = row.getUDTValue("c1"); assertThat(udt1.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); UDTValue udt2 = udt1.getUDTValue("f1"); assertThat(udt2.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); UDTValue udt3 = udt2.getUDTValue("f2"); assertThat(udt3.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); String f3 = udt3.getString("f3"); assertThat(f3).isEqualTo("foo"); }
@Test(groups = "short") public void should_create_token_from_partition_key() { Metadata metadata = cluster().getMetadata(); Row row = session().execute("SELECT token(i) FROM foo WHERE i = 1").one(); Token expected = row.getToken(0); ProtocolVersion protocolVersion = cluster().getConfiguration().getProtocolOptions().getProtocolVersion(); assertThat(metadata.newToken(TypeCodec.cint().serialize(1, protocolVersion))) .isEqualTo(expected); }
/** @jira_ticket JAVA-1209 */ @Test(groups = "short") public void getProtocolVersion_should_return_version() throws InterruptedException { ProtocolVersion version = cluster().getConfiguration().getProtocolOptions().getProtocolVersion(); assertThat(version).isNotNull(); } }
@Test(groups = "short") public void should_set_flag_on_successful_agreement() { ProtocolOptions protocolOptions = cluster().getConfiguration().getProtocolOptions(); protocolOptions.maxSchemaAgreementWaitSeconds = 10; ResultSet rs = session().execute(String.format(CREATE_TABLE, COUNTER.getAndIncrement())); assertThat(rs.getExecutionInfo().isSchemaInAgreement()).isTrue(); }
@Test(groups = "short") public void should_rethrow_on_client_timeouts() { cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(1); try { scassandras } finally { cluster .getConfiguration() .getSocketOptions() .setReadTimeoutMillis(SocketOptions.DEFAULT_READ_TIMEOUT_MILLIS);
/** * Validates that when metrics are enabled but JMX reporting is disabled via {@link * Cluster.Builder#withoutJMXReporting()} that {@link Cluster#getMetrics()} is not null and * 'clusterName-metrics:name=connected-to' MBean is present. * * @test_category metrics */ @Test(groups = "short", expectedExceptions = InstanceNotFoundException.class) public void should_be_no_jmx_mbean_when_jmx_is_disabled() throws Exception { Cluster cluster = register( Cluster.builder() .addContactPoints(getContactPoints()) .withPort(ccm().getBinaryPort()) .withoutJMXReporting() .build()); try { cluster.init(); assertThat(cluster.getMetrics()).isNotNull(); assertThat(cluster.getConfiguration().getMetricsOptions().isEnabled()).isTrue(); assertThat(cluster.getConfiguration().getMetricsOptions().isJMXReportingEnabled()).isFalse(); ObjectName clusterMetricsON = ObjectName.getInstance(cluster.getClusterName() + "-metrics:name=connected-to"); server.getMBeanInfo(clusterMetricsON); } finally { cluster.close(); } } }
@Test(groups = "short") public void should_log_timed_out_queries() throws Exception { // given error.setLevel(DEBUG); queryLogger = builder().build(); cluster.register(queryLogger); cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(1); String query = "SELECT foo FROM bar"; primingClient.prime( queryBuilder().withQuery(query).withThen(then().withFixedDelay(100L)).build()); // when try { session.execute(query); fail("Should have thrown OperationTimedOutException"); } catch (OperationTimedOutException e) { // ok } // then String line = errorAppender.waitAndGet(5000); assertThat(line) .contains("Query error") .contains(ip) .contains(Integer.toString(scassandra.getBinaryPort())) .contains(query) .contains("Timed out waiting for server response"); }
@Test(groups = "long") @CCMConfig(clusterProvider = "constant") public void constantReconnectionPolicyTest() throws Throwable { if (!(cluster().getConfiguration().getPolicies().getReconnectionPolicy() instanceof ConstantReconnectionPolicy)) { fail("Set policy does not match retrieved policy."); cluster().getConfiguration().getPolicies().getReconnectionPolicy(); assertTrue(reconnectionPolicy.getConstantDelayMs() == 10 * 1000);
@Test(groups = "short") public void simpleBatchTest() { try { PreparedStatement st = session().prepare("INSERT INTO test (k, v) VALUES (?, ?)"); BatchStatement batch = new BatchStatement(); batch.add(new SimpleStatement("INSERT INTO test (k, v) VALUES (?, ?)", "key1", 0)); batch.add(st.bind("key1", 1)); batch.add(st.bind("key2", 0)); assertEquals(3, batch.size()); session().execute(batch); ResultSet rs = session().execute("SELECT * FROM test"); Row r; r = rs.one(); assertEquals(r.getString("k"), "key1"); assertEquals(r.getInt("v"), 0); r = rs.one(); assertEquals(r.getString("k"), "key1"); assertEquals(r.getInt("v"), 1); r = rs.one(); assertEquals(r.getString("k"), "key2"); assertEquals(r.getInt("v"), 0); assertTrue(rs.isExhausted()); } catch (UnsupportedFeatureException e) { // This is expected when testing the protocol v1 assertEquals( cluster().getConfiguration().getProtocolOptions().getProtocolVersion(), ProtocolVersion.V1); } }
@Test(groups = "short") public void should_not_hang_when_executing_sync_queries() { primingClient.prime( .build(); cluster.getConfiguration().getPoolingOptions().setPoolTimeoutMillis(500); try { Session session = cluster.connect(); try { session.execute("server_error query"); fail("Exception expected"); } catch (Exception e) { session.execute("this should not block indefinitely"); } catch (NoHostAvailableException nhae) { cluster.close();
@Test(groups = "short") public void simplePagingTest() { try { session().execute(String.format("INSERT INTO test (k, v) VALUES ('%s', %d)", key, i)); ResultSet rs = session().execute(st); assertFalse(rs.isFullyFetched()); assertFalse(rs.isExhausted()); assertEquals(rs.getAvailableWithoutFetching(), 5 - (i % 5)); assertEquals(rs.one().getInt(0), i); cluster().getConfiguration().getProtocolOptions().getProtocolVersion(), ProtocolVersion.V1);
@Test(groups = "short") public void batchTest() throws Exception { try { PreparedStatement ps1 = session().prepare("INSERT INTO " + SIMPLE_TABLE2 + "(k, v) VALUES (?, ?)"); PreparedStatement ps2 = session().prepare("INSERT INTO " + SIMPLE_TABLE2 + "(k, v) VALUES (?, 'bar')"); BatchStatement bs = new BatchStatement(); bs.add(ps1.bind("one", "foo")); bs.add(ps2.bind("two")); bs.add( new SimpleStatement( "INSERT INTO " + SIMPLE_TABLE2 + " (k, v) VALUES ('three', 'foobar')")); session().execute(bs); List<Row> all = session().execute("SELECT * FROM " + SIMPLE_TABLE2).all(); assertEquals("three", all.get(0).getString("k")); assertEquals("foobar", all.get(0).getString("v")); assertEquals("one", all.get(1).getString("k")); assertEquals("foo", all.get(1).getString("v")); assertEquals("two", all.get(2).getString("k")); assertEquals("bar", all.get(2).getString("v")); } catch (UnsupportedFeatureException e) { // This is expected when testing the protocol v1 if (cluster().getConfiguration().getProtocolOptions().getProtocolVersion() != ProtocolVersion.V1) throw e; } }
@Test(groups = "short") public void should_log_all_parameter_types_bound_statements() throws Exception { // given normal.setLevel(TRACE); queryLogger = QueryLogger.builder().withMaxParameterValueLength(Integer.MAX_VALUE).build(); cluster().register(queryLogger); // when String query = "UPDATE test SET " + assignments + " WHERE pk = 42"; PreparedStatement ps = session().prepare(query); BoundStatement bs = ps.bind(values.toArray()); session().execute(bs); // then String line = normalAppender.waitAndGet(10000); assertThat(line).contains("Query completed normally").contains(ipOfNode(1)).contains(query); CodecRegistry codecRegistry = cluster().getConfiguration().getCodecRegistry(); for (DataType type : dataTypes) { TypeCodec<Object> codec = codecRegistry.codecFor(type); assertThat(line).contains(codec.format(getFixedValue(type))); } }
@Test(groups = "short") public void should_wait_until_all_executions_have_finished() { cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(1000); long execStartCount = errors.getSpeculativeExecutions().getCount(); ResultSet rs = session.execute("mock query"); Row row = rs.one(); ExecutionInfo executionInfo = rs.getExecutionInfo(); assertThat(executionInfo.getTriedHosts()).containsOnly(host1, host2, host3); assertThat(executionInfo.getQueriedHost()).isEqualTo(host3);
@Test(groups = "short") public void should_throttle_requests() { .getConfiguration() .getPoolingOptions() .setMaxRequestsPerConnection(HostDistance.LOCAL, maxRequests); final Host host = cluster().getMetadata().getHost(ccm().addressOfNode(1)); ScheduledExecutorService openConnectionsWatcherExecutor = Executors.newScheduledThreadPool(1); final Runnable openConnectionsWatcher = session().executeAsync("SELECT release_version FROM system.local");
@Test(groups = "short", dataProvider = "existingKeyspaceName") public void should_refresh_schema_and_token_map_if_schema_metadata_reenabled(String keyspace) throws Exception { try { schemaDisabledCluster.getConfiguration().getQueryOptions().setMetadataEnabled(true); assertThat(schemaDisabledCluster.getMetadata().getKeyspace(keyspace)).isNotNull(); Token token1 = schemaDisabledCluster.getMetadata().newToken("0"); Token token2 = schemaDisabledCluster.getMetadata().newToken("111111"); assertThat(token1).isNotNull(); schemaDisabledCluster.getConfiguration().getQueryOptions().setMetadataEnabled(true); verify(schemaDisabledControlConnection, after(1000).never()) .refreshSchema(null, null, null, null); schemaDisabledCluster.getConfiguration().getQueryOptions().setMetadataEnabled(false);