@Override public void propertyChange( final PropertyChangeEvent evt ) { final String propName = evt.getPropertyName(); if ( CassandraFig.ASTYANAX_READ_CL.equals( propName ) ) { readCl = ConsistencyLevel.valueOf( evt.getNewValue().toString() ); } else if ( CassandraFig.ASTYANAX_WRITE_CL.equals( propName ) ) { writeCl = ConsistencyLevel.valueOf( evt.getNewValue().toString() ); } else if (CassandraFig.SHARD_VALUES.equals(propName)){ shardSettings = parseShardSettings( cassandraFig.getShardValues() ); } } } );
private CLevel() { db = org.apache.cassandra.db.ConsistencyLevel.valueOf(toString()); thrift = org.apache.cassandra.thrift.ConsistencyLevel.valueOf(toString()); astyanax = com.netflix.astyanax.model.ConsistencyLevel.valueOf("CL_" + toString()); }
@Override public Lock createLock(final UUID applicationId, final String... path ) { String lockPath = LockPathBuilder.buildPath( applicationId, path ); ConsistencyLevel consistencyLevel; try{ consistencyLevel = ConsistencyLevel.valueOf(cassandraFig.getLocksCl()); }catch(IllegalArgumentException e){ logger.warn( "Property {} value provided: {} is not valid", CassandraFig.LOCKS_CL, cassandraFig.getLocksCl() ); // just default it to local quorum if we can't parse consistencyLevel = ConsistencyLevel.CL_LOCAL_QUORUM; } int lockExpiration; int lockConfigExpiration = cassandraFig.getLocksExpiration(); if( lockConfigExpiration >= MINIMUM_LOCK_EXPIRATION ){ lockExpiration = Math.min(cassandraFig.getLocksExpiration(), Integer.MAX_VALUE); }else{ logger.warn("Property {} is not valid. Choose a value greater than or equal to {}", CassandraFig.LOCKS_EXPIRATION, MINIMUM_LOCK_EXPIRATION); // use the default if seomthing below the minimum is provided lockExpiration = Integer.valueOf(CassandraFig.DEFAULT_LOCKS_EXPIRATION); } ColumnPrefixDistributedRowLock<String> lock = new ColumnPrefixDistributedRowLock<>(keyspace, getLocksColumnFamily(), lockPath) .expireLockAfter( lockExpiration, TimeUnit.MILLISECONDS) .withConsistencyLevel(consistencyLevel); return new AstyanaxLockImpl( lock ); }
private CLevel() { db = org.apache.cassandra.db.ConsistencyLevel.valueOf(toString()); thrift = org.apache.cassandra.thrift.ConsistencyLevel.valueOf(toString()); astyanax = com.netflix.astyanax.model.ConsistencyLevel.valueOf("CL_" + toString()); }
private CLevel() { db = org.apache.cassandra.db.ConsistencyLevel.valueOf(toString()); thrift = org.apache.cassandra.thrift.ConsistencyLevel.valueOf(toString()); astyanax = com.netflix.astyanax.model.ConsistencyLevel.valueOf("CL_" + toString()); }
configuration.setDiscoveryType(NodeDiscoveryType.valueOf(config.getProperty("astyanax.connection.discovery", "NONE"))); configuration.setConnectionPoolType(ConnectionPoolType.valueOf(config.getProperty("astyanax.connection.pool", "ROUND_ROBIN"))); configuration.setDefaultReadConsistencyLevel(ConsistencyLevel.valueOf(com.netflix.jmeter.properties.Properties.instance.cassandra.getReadConsistency())); configuration.setDefaultWriteConsistencyLevel(ConsistencyLevel.valueOf(com.netflix.jmeter.properties.Properties.instance.cassandra.getWriteConsistency()));
/** * Initialize the client */ @Override public void init(DataGenerator dataGenerator) throws Exception { try { this.dataGenerator = dataGenerator; this.cluster = cassA6XManager.registerCluster(config.getCluster(), config.getHost(), config.getHostPort()); this.keyspace = cassA6XManager.registerKeyspace(config.getCluster(), config.getKeyspace(), config.getHost(), config.getHostPort()); AstyanaxConfigurationImpl aci = (AstyanaxConfigurationImpl) this.keyspace.getConfig(); aci.setDefaultReadConsistencyLevel(ConsistencyLevel.valueOf(config.getReadConsistencyLevel())) .setDefaultWriteConsistencyLevel(ConsistencyLevel.valueOf(config.getWriteConsistencyLevel())); CF = new ColumnFamily<>(config.getCfname(), StringSerializer.get(), IntegerSerializer.get(), StringSerializer.get()); if (config.getCreateSchema()) { logger.info("Trying to upsert schema"); preInit(); } } catch (ConnectionException e) { logger.error("Failed to initialize Astyanax driver"); throw e; } logger.info("Registered keyspace : " + this.keyspace.getKeyspaceName()); logger.info("Initialized CassAstyanaxPlugin"); }
/** * Initialize the client */ @Override public void init(DataGenerator dataGenerator) throws Exception { try { this.dataGenerator = dataGenerator; this.cluster = cassA6XManager.registerCluster(config.getCluster(), config.getHost(), config.getHostPort()); this.keyspace = cassA6XManager.registerKeyspace(config.getCluster(), config.getKeyspace(), config.getHost(), config.getHostPort()); AstyanaxConfigurationImpl aci = (AstyanaxConfigurationImpl) this.keyspace.getConfig(); aci.setDefaultReadConsistencyLevel(ConsistencyLevel.valueOf(config.getReadConsistencyLevel())) .setDefaultWriteConsistencyLevel(ConsistencyLevel.valueOf(config.getWriteConsistencyLevel())); CF = new ColumnFamily<>(config.getCfname(), StringSerializer.get(), IntegerSerializer.get(), StringSerializer.get()); if (config.getCreateSchema()) { logger.info("Trying to upsert schema"); preInit(); } } catch (ConnectionException e) { logger.error("Failed to initialize Astyanax driver"); throw e; } logger.info("Registered keyspace : " + this.keyspace.getKeyspaceName()); logger.info("Initialized CassAstyanaxPlugin"); }
/** * Perform a single read operation * * @param key * @return * @throws Exception This could throw exceptions when there are exceptions in read path */ @Override public String readSingle(String key) throws Exception { ColumnList<Integer> result = keyspace.prepareQuery(this.CF) .setConsistencyLevel(ConsistencyLevel.valueOf(config.getReadConsistencyLevel())) .getRow(key) .execute().getResult(); if (!result.isEmpty()) { if (result.size() < (config.getColsPerRow())) { throw new Exception("Num Cols returned not ok " + result.size()); } } else { return CacheMiss; } return ResultOK; }
/** * Perform a single write operation * * @param key * @return * @throws Exception could throw exceptions when there are exceptions in write path */ @Override public String writeSingle(String key) throws Exception { MutationBatch m = keyspace.prepareMutationBatch().withConsistencyLevel(ConsistencyLevel.valueOf(config.getWriteConsistencyLevel())); ColumnListMutation<Integer> colsMutation = m.withRow(this.CF, key); for (int i = 0; i < config.getColsPerRow(); i++) { colsMutation.putColumn(i, dataGenerator.getRandomValue()); } m.execute(); return ResultOK; }
/** * Perform a single read operation * * @param key * @return * @throws Exception This could throw exceptions when there are exceptions in read path */ @Override public String readSingle(String key) throws Exception { ColumnList<Integer> result = keyspace.prepareQuery(this.CF) .setConsistencyLevel(ConsistencyLevel.valueOf(config.getReadConsistencyLevel())) .getRow(key) .execute().getResult(); if (!result.isEmpty()) { if (result.size() < (config.getColsPerRow())) { throw new Exception("Num Cols returned not ok " + result.size()); } } else { return CacheMiss; } return ResultOK; }
/** * Perform a single write operation * * @param key * @return * @throws Exception could throw exceptions when there are exceptions in write path */ @Override public String writeSingle(String key) throws Exception { MutationBatch m = keyspace.prepareMutationBatch().withConsistencyLevel(ConsistencyLevel.valueOf(config.getWriteConsistencyLevel())); ColumnListMutation<Integer> colsMutation = m.withRow(this.CF, key); for (int i = 0; i < config.getColsPerRow(); i++) { colsMutation.putColumn(i, dataGenerator.getRandomValue()); } m.execute(); return ResultOK; }
.setDiscoveryType(NodeDiscoveryType.RING_DESCRIBE) .setConnectionPoolType(ConnectionPoolType.TOKEN_AWARE) .setDefaultReadConsistencyLevel(ConsistencyLevel.valueOf("CL_"+CMBProperties.getInstance().getReadConsistencyLevel())) .setDefaultWriteConsistencyLevel(ConsistencyLevel.valueOf("CL_"+CMBProperties.getInstance().getWriteConsistencyLevel()))) .withConnectionPoolConfiguration(connectionPoolConfiguration) .withConnectionPoolMonitor(new CountingConnectionPoolMonitor())