private T insertLocked(T entity) { Insert insert = insertInto(getColumnFamilyName()).ifNotExists(); CassandraEntityMapper<T> entityMapper = CassandraEntityMapper.getEntityMapperForClass( getColumnFamilyClass(), cassandraClient); for (String name : entityMapper.getKeyColumnNames()) { insert.value(name, entityMapper.getColumnValueForName(name, entity, cassandraClient)); } for (String name : entityMapper.getNonKeyColumnNames()) { insert.value(name, entityMapper.getColumnValueForName(name, entity, cassandraClient)); } insert.setConsistencyLevel(getWriteConsistencyLevel()); ResultSet res = execute(insert); if (!res.wasApplied()) { LOG.error("[{}] Can't insert entity. Entity already exists!", getColumnFamilyClass()); throw new KaaOptimisticLockingFailureException("Can't insert entity. Entity already exists!"); } else { Clause[] whereClauses = buildKeyClauses(entityMapper, entity); Select.Where where = select().from(getColumnFamilyName()).where(whereClauses[0]); if (whereClauses.length > 1) { for (int i = 1; i < whereClauses.length; i++) { where = where.and(whereClauses[i]); } } return findOneByStatement(where); } }
private void testLargeText(int key) throws Throwable { // Write data StringBuilder b = new StringBuilder(); for (int i = 0; i < 1000000; ++i) { // Create ultra-long text b.append(i); } session() .execute( insertInto("large_text") .value("k", key) .value("txt", b.toString()) .setConsistencyLevel(ConsistencyLevel.QUORUM)); // Read data Row row = session().execute(select().all().from("large_text").where(eq("k", key))).one(); // Verify data assertTrue(b.toString().equals(row.getString("txt"))); }
private void testByteRows(int key) throws Throwable { // Build small ByteBuffer sample ByteBuffer bb = ByteBuffer.allocate(58); bb.putShort((short) 0xCAFE); bb.flip(); // Write data for (int i = 0; i < 1000000; ++i) { session() .execute( insertInto("wide_byte_rows") .value("k", key) .value("i", bb) .setConsistencyLevel(ConsistencyLevel.QUORUM)); } // Read data ResultSet rs = session().execute(select("i").from("wide_byte_rows").where(eq("k", key))); // Verify data for (Row row : rs) { assertEquals(row.getBytes("i"), bb); } }
@Override public InsertBuilder<T> withConsistencyLevel(ConsistencyLevel consistency) { insert.setConsistencyLevel(consistency); return this; }
private void testWideRows(int key) throws Throwable { // Write data for (int i = 0; i < 1000000; ++i) { session() .execute( insertInto("wide_rows") .value("k", key) .value("i", i) .setConsistencyLevel(ConsistencyLevel.QUORUM)); } // Read data ResultSet rs = session().execute(select("i").from("wide_rows").where(eq("k", key))); // Verify data int i = 0; for (Row row : rs) { assertTrue(row.getInt("i") == i++); } }
private void testWideTable(int key) throws Throwable { // Write data Insert insertStatement = insertInto("wide_table").value("k", key); for (int i = 0; i < 330; ++i) { insertStatement = insertStatement.value(createColumnName(i), i); } session().execute(insertStatement.setConsistencyLevel(ConsistencyLevel.QUORUM)); // Read data Row row = session().execute(select().all().from("wide_table").where(eq("k", key))).one(); // Verify data for (int i = 0; i < 330; ++i) { assertTrue(row.getInt(createColumnName(i)) == i); } }
private void insertBlockedDeltas(BatchStatement batchStatement, BlockedDeltaTableDDL tableDDL, ConsistencyLevel consistencyLevel, ByteBuffer rowKey, UUID changeId, ByteBuffer encodedDelta) { List<ByteBuffer> blocks = _daoUtils.getDeltaBlocks(encodedDelta); if (blocks.size() > 1) { _blockedRowsMigratedMeter.mark(); } for (int i = 0; i < blocks.size(); i++) { batchStatement.add(QueryBuilder.insertInto(tableDDL.getTableMetadata()) .value(tableDDL.getRowKeyColumnName(), rowKey) .value(tableDDL.getChangeIdColumnName(), changeId) .value(tableDDL.getBlockColumnName(), i) .value(tableDDL.getValueColumnName(), blocks.get(i)) .setConsistencyLevel(consistencyLevel)); } }
public <T> Insert createStatment(T bean, ConsistencyLevel consistency) { ClassInformation classInformation = ClassInformations.INSTACE.getClass(bean.getClass()); isKeyNull(bean, classInformation); KeySpaceInformation key = classInformation.getKeySpace(keySpace); Insert insert = QueryBuilder.insertInto(key.getKeySpace(), key.getColumnFamily()); insert = createInsert(bean, insert, classInformation); insert.setConsistencyLevel(consistency); return insert; }
/** * Update current database version to the migration version. This is executed after migration success. * * @param migration Migration that updated the database version * @return Success of version update */ public boolean updateVersion(final Migration migration) { final Statement insert = QueryBuilder.insertInto(SCHEMA_VERSION_CF).value(TYPE, migration.getType().name()) .value(VERSION, migration.getVersion()).value(TIMESTAMP, System.currentTimeMillis()) .value(DESCRIPTION, migration.getDescription()).setConsistencyLevel(ConsistencyLevel.ALL); try { session.execute(insert); return true; } catch (final Exception e) { LOGGER.error("Failed to execute update version statement", e); return false; } } }
private void insertBlockedDeltas(BatchStatement batchStatement, BlockedDeltaTableDDL tableDDL, ConsistencyLevel consistencyLevel, ByteBuffer rowKey, UUID changeId, ByteBuffer encodedDelta) { List<ByteBuffer> blocks = _daoUtils.getDeltaBlocks(encodedDelta); if (blocks.size() > 1) { _blockedRowsMigratedMeter.mark(); } for (int i = 0; i < blocks.size(); i++) { batchStatement.add(QueryBuilder.insertInto(tableDDL.getTableMetadata()) .value(tableDDL.getRowKeyColumnName(), rowKey) .value(tableDDL.getChangeIdColumnName(), changeId) .value(tableDDL.getBlockColumnName(), i) .value(tableDDL.getValueColumnName(), blocks.get(i)) .setConsistencyLevel(consistencyLevel)); } }
.value(deltaTableDDL.getChangeIdColumnName(), compactionKey) .value(deltaTableDDL.getValueColumnName(), encodedCompaction) .setConsistencyLevel(consistencyLevel); oldTableFuture = session.executeAsync(oldTableStatement);
private void testLargeText(int key) throws Throwable { // Write data StringBuilder b = new StringBuilder(); for (int i = 0; i < 1000000; ++i) { // Create ultra-long text b.append(i); } session() .execute( insertInto("large_text") .value("k", key) .value("txt", b.toString()) .setConsistencyLevel(ConsistencyLevel.QUORUM)); // Read data Row row = session().execute(select().all().from("large_text").where(eq("k", key))).one(); // Verify data assertTrue(b.toString().equals(row.getString("txt"))); }
public void write(Object[] args) { PersonRegister person = PersonRegister.getInstance(args); Statement query = QueryBuilder.insertInto("PersonRegister").value("MSDIN", person.getMsdin()) .value("firstName", person.getFirstName()).value("lastName", person.getLastName()) .value("birthDate", person.getBirthDate()).value("gender", person.getGender()) .setConsistencyLevel(ConsistencyLevel.QUORUM);; batchStatement.add(query); numStatement++; if (numStatement == NUM_BATCH_STATEMENT) { LOGGER.info("Execute batch query PersonRegister: "+ numIteration++); session.execute(batchStatement); batchStatement.clear(); numStatement = 0; } }
private void testByteRows(int key) throws Throwable { // Build small ByteBuffer sample ByteBuffer bb = ByteBuffer.allocate(58); bb.putShort((short) 0xCAFE); bb.flip(); // Write data for (int i = 0; i < 1000000; ++i) { session() .execute( insertInto("wide_byte_rows") .value("k", key) .value("i", bb) .setConsistencyLevel(ConsistencyLevel.QUORUM)); } // Read data ResultSet rs = session().execute(select("i").from("wide_byte_rows").where(eq("k", key))); // Verify data for (Row row : rs) { assertEquals(row.getBytes("i"), bb); } }
private void testWideRows(int key) throws Throwable { // Write data for (int i = 0; i < 1000000; ++i) { session() .execute( insertInto("wide_rows") .value("k", key) .value("i", i) .setConsistencyLevel(ConsistencyLevel.QUORUM)); } // Read data ResultSet rs = session().execute(select("i").from("wide_rows").where(eq("k", key))); // Verify data int i = 0; for (Row row : rs) { assertTrue(row.getInt("i") == i++); } }
.value("jvm_md_swap_free", processMap(analyse.jvmMemoryDetails(), MemoryDetails::swapFree)) .value("ext", analyse.ext()) .setConsistencyLevel(ConsistencyLevel.ALL); connector.session().execute(insert); return newId.toString();
private void testWideTable(int key) throws Throwable { // Write data Insert insertStatement = insertInto("wide_table").value("k", key); for (int i = 0; i < 330; ++i) { insertStatement = insertStatement.value(createColumnName(i), i); } session().execute(insertStatement.setConsistencyLevel(ConsistencyLevel.QUORUM)); // Read data Row row = session().execute(select().all().from("wide_table").where(eq("k", key))).one(); // Verify data for (int i = 0; i < 330; ++i) { assertTrue(row.getInt(createColumnName(i)) == i); } }
/** * @param options * @param insert */ public static void applyOptions(WriteOptions options, Insert insert, EntityTypeMetadata emeta) { int ttl = getTtl(options, emeta); if (ttl > -1) { insert.using(ttl(ttl)); } if (options != null) { if (options.getTimestamp() != -1) { insert.using(timestamp(options.getTimestamp())); } if (options.getConsistencyLevel() != null) { insert.setConsistencyLevel(options.getConsistencyLevel()); } if (options.getRetryPolicy() != null) { insert.setRetryPolicy(options.getRetryPolicy()); } } }
public void write(Object[] args) { Register register = Register.getInstance(args); Statement query = QueryBuilder.insertInto("Register").value("id", register.getId()) .value("MSDIN", register.getMsdin()).value("firstName", register.getFirstName()) .value("lastName", register.getLastName()).value("birthDate", register.getBirthDate()) .value("gender", register.getGender()).value("codPacote", register.getCodPacote()) .value("dateTime", register.getDateTime()).value("value", register.getValue()) .value("type", register.getType()).value("country",register.getCountry()) .setConsistencyLevel(ConsistencyLevel.QUORUM); batchStatement.add(query); numStatement++; if (numStatement == NUM_BATCH_STATEMENT) { LOGGER.info("Execute batch query PersonRegister: " + numIteration++); session.execute(batchStatement); batchStatement.clear(); numStatement = 0; } } }
protected RegularStatement addStatement(GCEvent event) { return (RegularStatement) QueryBuilder.insertInto(TABLE_NAME).value("id", event.id() != null ? UUID.fromString(event.id()) : uuid()) .value("parent_id", event.parentEvent().isPresent() ? UUID.fromString(event.parentEvent().orElse(null)) : null) .value("analyse_id", UUID.fromString(event.analyseId())) .value("bucket_id", event.bucketId()) .value("date", event.occurred().toString(DATE_PATTERN)) .value("jvm_id", event.jvmId()) .value("description", event.description()) .value("tmstm", event.timestamp()) .value("written_at", UUIDGen.getTimeUUID(event.occurred().getMillis())) .value("occurred", event.occurred().getMillis()) .value("cause", event.cause().type()) .value("properties", event.properties()) .value("vm_event_type", event.vmEventType().type()) .value("capacity", Arrays.asList(event.capacity().usedBefore(), event.capacity().usedAfter(), event.capacity().total())) .value("total_capacity", Arrays.asList(event.totalCapacity().usedBefore(), event.totalCapacity().usedAfter(), event.totalCapacity().total())) .value("pause_mu", event.pauseMu()) .value("user_time", event.user()) .value("sys_time", event.sys()) .value("real_time", event.real()) .value("phase", event.phase().type()) .value("generations", EnumSetUtils.encode(event.generations())) .value("concurrency", event.concurrency().type()) .value("gen_cap_before", processKeyMap(event.capacityByGeneration(), Generation::type, Capacity::usedBefore)) .value("gen_cap_after", processKeyMap(event.capacityByGeneration(), Generation::type, Capacity::usedAfter)) .value("gen_cap_total", processKeyMap(event.capacityByGeneration(), Generation::type, Capacity::total)) .value("ext", event.ext()).setConsistencyLevel(ConsistencyLevel.ONE); }