Refine search
@Override public void recordTransferLog( String queueName, String source, String dest, UUID messageId) { Statement insert = QueryBuilder.insertInto(TABLE_TRANSFER_LOG) .value(COLUMN_QUEUE_NAME, queueName ) .value(COLUMN_SOURCE_REGION, source ) .value(COLUMN_DEST_REGION, dest ) .value(COLUMN_MESSAGE_ID, messageId ) .value(COLUMN_TRANSFER_TIME, System.currentTimeMillis() ); cassandraClient.getApplicationSession().execute(insert); // logger.debug("Recorded transfer log for queue {} dest {} messageId {}", // queueName, dest, messageId); }
public CassandraPageSink( CassandraSession cassandraSession, String schemaName, String tableName, List<String> columnNames, List<Type> columnTypes, boolean generateUUID) { this.cassandraSession = requireNonNull(cassandraSession, "cassandraSession"); requireNonNull(schemaName, "schemaName is null"); requireNonNull(tableName, "tableName is null"); requireNonNull(columnNames, "columnNames is null"); this.columnTypes = ImmutableList.copyOf(requireNonNull(columnTypes, "columnTypes is null")); this.generateUUID = generateUUID; Insert insert = insertInto(schemaName, tableName); if (generateUUID) { insert.value("id", bindMarker()); } for (int i = 0; i < columnNames.size(); i++) { String columnName = columnNames.get(i); checkArgument(columnName != null, "columnName is null at position: %d", i); insert.value(columnName, bindMarker()); } this.insert = cassandraSession.prepare(insert); }
@Override public void recordAuditLog( AuditLog.Action action, AuditLog.Status status, String queueName, String region, UUID messageId, UUID queueMessageId ) { Statement insert = QueryBuilder.insertInto(TABLE_AUDIT_LOG) .value(COLUMN_ACTION, action.toString() ) .value(COLUMN_STATUS, status.toString() ) .value(COLUMN_QUEUE_NAME, queueName ) .value(COLUMN_REGION, region ) .value(COLUMN_MESSAGE_ID, messageId ) .value(COLUMN_QUEUE_MESSAGE_ID, queueMessageId ) .value(COLUMN_TRANSFER_TIME, System.currentTimeMillis() ); cassandraClient.getApplicationSession().execute(insert); }
/** * @return cql query statement to insert a new task into the "workflows" table */ public String getInsertTaskStatement() { return QueryBuilder.insertInto(keyspace, TABLE_WORKFLOWS) .value(WORKFLOW_ID_KEY, bindMarker()) .value(SHARD_ID_KEY, bindMarker()) .value(TASK_ID_KEY, bindMarker()) .value(ENTITY_KEY, ENTITY_TYPE_TASK) .value(PAYLOAD_KEY, bindMarker()) .getQueryString(); }
@Override public void writeMessageData( final UUID messageId, final DatabaseQueueMessageBody messageBody ) { Preconditions.checkArgument(QakkaUtils.isTimeUuid(messageId), "MessageId is not a type 1 UUID"); logger.trace("writeMessageData {}", messageId); Statement insert = QueryBuilder.insertInto(TABLE_MESSAGE_DATA) .value( COLUMN_MESSAGE_ID, messageId) .value( COLUMN_MESSAGE_DATA, messageBody.getBlob()) .value( COLUMN_CONTENT_TYPE, messageBody.getContentType()) .using( QueryBuilder.ttl( maxTtl ) ); cassandraClient.getApplicationSession().execute(insert); }
/** * @return cql query statement to insert a new workflow into the "workflows" table */ public String getInsertWorkflowStatement() { return QueryBuilder.insertInto(keyspace, TABLE_WORKFLOWS) .value(WORKFLOW_ID_KEY, bindMarker()) .value(SHARD_ID_KEY, bindMarker()) .value(TASK_ID_KEY, bindMarker()) .value(ENTITY_KEY, ENTITY_TYPE_WORKFLOW) .value(PAYLOAD_KEY, bindMarker()) .value(TOTAL_TASKS_KEY, bindMarker()) .value(TOTAL_PARTITIONS_KEY, bindMarker()) .getQueryString(); }
public void createShard(final Shard shard){ Statement insert = QueryBuilder.insertInto(getTableName(shard.getType())) .value(COLUMN_QUEUE_NAME, shard.getQueueName()) .value(COLUMN_REGION, shard.getRegion()) .value(COLUMN_SHARD_ID, shard.getShardId()) .value(COLUMN_ACTIVE, 1) .value(COLUMN_POINTER, shard.getPointer()); cassandraClient.getQueueMessageSession().execute(insert); }
@Override public void writeQueue(DatabaseQueue queue) { logger.trace( "writeQueue " + queue.getName() ); Statement insert = QueryBuilder.insertInto(TABLE_QUEUES) .value(COLUMN_QUEUE_NAME, queue.getName()) .value(COLUMN_REGIONS, queue.getRegions()) .value(COLUMN_DEFAULT_DESTINATIONS, queue.getDefaultDestinations()) .value(COLUMN_DEFAULT_DELAY_MS, queue.getDefaultDelayMs()) .value(COLUMN_RETRY_COUNT, queue.getRetryCount()) .value(COLUMN_HANDLING_TIMEOUT_SEC, queue.getHandlingTimeoutSec()) .value(COLUMN_DEAD_LETTER_QUEUE, queue.getDeadLetterQueue()); cassandraClient.getApplicationSession().execute(insert); }
private static String insertSQL(final String table, final int timeout) { Insert insertInto = insertInto(table) .value(ID, raw("?")) .value(CREATED_AT, raw("?")) .value(ACCESSED_AT, raw("?")) .value(SAVED_AT, raw("?")) .value(ATTRIBUTES, raw("?")); if (timeout > 0) { insertInto.using(ttl(timeout)); } return insertInto.getQueryString(); }
@Override public void putLong( final MapScope scope, final String key, final Long value ) { Preconditions.checkNotNull( scope, "mapscope is required" ); Preconditions.checkNotNull( key, "key is required" ); Preconditions.checkNotNull( value, "value is required" ); Statement mapEntry = QueryBuilder.insertInto(MAP_ENTRIES_TABLE) .value("key", getMapEntryPartitionKey(scope, key)) .value("column1", DataType.cboolean().serialize(true, ProtocolVersion.NEWEST_SUPPORTED)) .value("value", DataType.bigint().serialize(value, ProtocolVersion.NEWEST_SUPPORTED)); session.execute(mapEntry); final int bucket = BUCKET_LOCATOR.getCurrentBucket( scope.getName() ); Statement mapKey; mapKey = QueryBuilder.insertInto(MAP_KEYS_TABLE) .value("key", getMapKeyPartitionKey(scope, bucket)) .value("column1", DataType.text().serialize(key, ProtocolVersion.NEWEST_SUPPORTED)) .value("value", DataType.cboolean().serialize(true, ProtocolVersion.NEWEST_SUPPORTED)); session.execute(mapKey); }
public static void insertIntoTableMultiPartitionClusteringKeys(CassandraSession session, SchemaTableName table) { for (Integer rowNumber = 1; rowNumber < 10; rowNumber++) { Insert insert = QueryBuilder.insertInto(table.getSchemaName(), table.getTableName()) .value("partition_one", "partition_one_" + rowNumber.toString()) .value("partition_two", "partition_two_" + rowNumber.toString()) .value("clust_one", "clust_one") .value("clust_two", "clust_two_" + rowNumber.toString()) .value("clust_three", "clust_three_" + rowNumber.toString()); session.execute(insert); } assertEquals(session.execute("SELECT COUNT(*) FROM " + table).all().get(0).getLong(0), 9); }
public static void insertIntoTableClusteringKeys(CassandraSession session, SchemaTableName table, int rowsCount) { for (Integer rowNumber = 1; rowNumber <= rowsCount; rowNumber++) { Insert insert = QueryBuilder.insertInto(table.getSchemaName(), table.getTableName()) .value("key", "key_" + rowNumber.toString()) .value("clust_one", "clust_one") .value("clust_two", "clust_two_" + rowNumber.toString()) .value("clust_three", "clust_three_" + rowNumber.toString()); session.execute(insert); } assertEquals(session.execute("SELECT COUNT(*) FROM " + table).all().get(0).getLong(0), rowsCount); }
@Override public void putUuid( final MapScope scope, final String key, final UUID putUuid ) { Preconditions.checkNotNull( scope, "mapscope is required" ); Preconditions.checkNotNull( key, "key is required" ); Preconditions.checkNotNull( putUuid, "value is required" ); final BatchStatement batchStatement = new BatchStatement(); batchStatement.add(QueryBuilder.insertInto(MAP_ENTRIES_TABLE) .value("key", getMapEntryPartitionKey(scope, key)) .value("column1", DataType.cboolean().serialize(true, ProtocolVersion.NEWEST_SUPPORTED)) .value("value", DataType.uuid().serialize(putUuid, ProtocolVersion.NEWEST_SUPPORTED))); final int bucket = BUCKET_LOCATOR.getCurrentBucket( scope.getName() ); batchStatement.add(QueryBuilder.insertInto(MAP_KEYS_TABLE) .value("key", getMapKeyPartitionKey(scope, bucket)) .value("column1", DataType.text().serialize(key, ProtocolVersion.NEWEST_SUPPORTED)) .value("value", DataType.serializeValue(null, ProtocolVersion.NEWEST_SUPPORTED))); session.execute(batchStatement); }
public static void insertIntoTableClusteringKeysInequality(CassandraSession session, SchemaTableName table, Date date, int rowsCount) { for (Integer rowNumber = 1; rowNumber <= rowsCount; rowNumber++) { Insert insert = QueryBuilder.insertInto(table.getSchemaName(), table.getTableName()) .value("key", "key_1") .value("clust_one", "clust_one") .value("clust_two", rowNumber) .value("clust_three", date.getTime() + rowNumber * 10); session.execute(insert); } assertEquals(session.execute("SELECT COUNT(*) FROM " + table).all().get(0).getLong(0), rowsCount); }
QueryBuilder.insertInto(TOKENS_TABLE) .value("key", DataType.serializeValue(tokenUUID, ProtocolVersion.NEWEST_SUPPORTED)) .value("column1", DataType.serializeValue(key, ProtocolVersion.NEWEST_SUPPORTED)) .value("value", valueBuffer) .using(usingTTL)); QueryBuilder.insertInto(PRINCIPAL_TOKENS_TABLE) .value("key", principalKeyBuffer) .value("column1", DataType.serializeValue(tokenUUID, ProtocolVersion.NEWEST_SUPPORTED))
bb.flip(); final Statement cacheEntry = QueryBuilder.insertInto(SCOPED_CACHE_TABLE) .using(timeToLive) .value("key", getPartitionKey(scope, rowKeyString, bucket)) .value("column1", DataType.text().serialize(columnName, ProtocolVersion.NEWEST_SUPPORTED)) .value("value", bb);
private static void insertTestData(CassandraSession session, SchemaTableName table, Date date, int rowsCount) { for (Integer rowNumber = 1; rowNumber <= rowsCount; rowNumber++) { Insert insert = QueryBuilder.insertInto(table.getSchemaName(), table.getTableName()) .value("key", "key " + rowNumber.toString()) .value("typeuuid", UUID.fromString(String.format("00000000-0000-0000-0000-%012d", rowNumber))) .value("typeinteger", rowNumber) .value("typelong", rowNumber.longValue() + 1000) .value("typebytes", ByteBuffer.wrap(Ints.toByteArray(rowNumber)).asReadOnlyBuffer()) .value("typetimestamp", date) .value("typeansi", "ansi " + rowNumber) .value("typeboolean", rowNumber % 2 == 0) .value("typedecimal", new BigDecimal(Math.pow(2, rowNumber))) .value("typedouble", Math.pow(4, rowNumber)) .value("typefloat", (float) Math.pow(8, rowNumber)) .value("typeinet", InetAddresses.forString("127.0.0.1")) .value("typevarchar", "varchar " + rowNumber) .value("typevarint", BigInteger.TEN.pow(rowNumber)) .value("typetimeuuid", UUID.fromString(String.format("d2177dd0-eaa2-11de-a572-001b779c76e%d", rowNumber))) .value("typelist", ImmutableList.of("list-value-1" + rowNumber, "list-value-2" + rowNumber)) .value("typemap", ImmutableMap.of(rowNumber, rowNumber + 1L, rowNumber + 2, rowNumber + 3L)) .value("typeset", ImmutableSet.of(false, true)); session.execute(insert); } assertEquals(session.execute("SELECT COUNT(*) FROM " + table).all().get(0).getLong(0), rowsCount); } }
Insert insertStmt = QueryBuilder.insertInto(table); insertStmt.value(YCSB_KEY, QueryBuilder.bindMarker()); insertStmt.value(field, QueryBuilder.bindMarker());
Using timeToLive = QueryBuilder.ttl(ttl); batchStatement.add(QueryBuilder.insertInto(MAP_ENTRIES_TABLE) .using(timeToLive) .value("key", getMapEntryPartitionKey(scope, key)) .value("column1", DataType.cboolean().serialize(true, ProtocolVersion.NEWEST_SUPPORTED)) .value("value", DataType.text().serialize(value, ProtocolVersion.NEWEST_SUPPORTED))); batchStatement.add(QueryBuilder.insertInto(MAP_KEYS_TABLE) .using(timeToLive) .value("key", getMapKeyPartitionKey(scope, bucket)) .value("column1", DataType.text().serialize(key, ProtocolVersion.NEWEST_SUPPORTED)) .value("value", DataType.cboolean().serialize(true, ProtocolVersion.NEWEST_SUPPORTED))); }else{ batchStatement.add(QueryBuilder.insertInto(MAP_ENTRIES_TABLE) .value("key", getMapEntryPartitionKey(scope, key)) .value("column1", DataType.cboolean().serialize(true, ProtocolVersion.NEWEST_SUPPORTED)) batchStatement.add(QueryBuilder.insertInto(MAP_KEYS_TABLE) .value("key", getMapKeyPartitionKey(scope, bucket)) .value("column1", DataType.text().serialize(key, ProtocolVersion.NEWEST_SUPPORTED))
Statement uniqueValueStatement = QueryBuilder.insertInto(TABLE_UNIQUE_VALUES) .value("key", partitionKey) .value("column1", serializeUniqueValueColumn(ev)) .value("value", DataType.serializeValue(COL_VALUE, ProtocolVersion.NEWEST_SUPPORTED)) .using(ttl); Statement uniqueValueStatement = QueryBuilder.insertInto(TABLE_UNIQUE_VALUES) .value("key", partitionKey) .value("column1", serializeUniqueValueColumn(ev)) Statement uniqueValueLogStatement = QueryBuilder.insertInto(TABLE_UNIQUE_VALUES_LOG) .value("key", logPartitionKey) .value("column1", serializeUniqueValueLogColumn(uniqueFieldEntry))