@Override public boolean isCheckpointCommitted(int subtaskIdx, long checkpointId) { // Pending checkpointed buffers are committed in ascending order of their // checkpoint id. This way we can tell if a checkpointed buffer was committed // just by asking the third-party storage system for the last checkpoint id // committed by the specified subtask. Long lastCommittedCheckpoint = lastCommittedCheckpoints.get(subtaskIdx); if (lastCommittedCheckpoint == null) { String statement = String.format( "SELECT checkpoint_id FROM %s.%s where sink_id='%s' and sub_id=%d;", keySpace, table, operatorId, subtaskIdx); Iterator<Row> resultIt = session.execute(statement).iterator(); if (resultIt.hasNext()) { lastCommittedCheckpoint = resultIt.next().getLong("checkpoint_id"); lastCommittedCheckpoints.put(subtaskIdx, lastCommittedCheckpoint); } } return lastCommittedCheckpoint != null && checkpointId <= lastCommittedCheckpoint; } }
private Iterator<DatabaseQueueMessage> getIteratorFromRows(List<Row> rows){ List<DatabaseQueueMessage> messages = new ArrayList<>(rows.size()); rows.forEach(row -> { final String queueName = row.getString( COLUMN_QUEUE_NAME); final String region = row.getString( COLUMN_REGION); final long shardId = row.getLong( COLUMN_SHARD_ID); final UUID queueMessageId = row.getUUID( COLUMN_QUEUE_MESSAGE_ID); final UUID messageId = row.getUUID( COLUMN_MESSAGE_ID); final long queuedAt = row.getLong( COLUMN_QUEUED_AT); final long inflightAt = row.getLong( COLUMN_INFLIGHT_AT); messages.add(new DatabaseQueueMessage( messageId, messageType, queueName, region, shardId, queuedAt, inflightAt, queueMessageId)); //queueMessageId is internal to the messages_available and messages_inflight tables nextStart = queueMessageId; }); return messages.iterator(); }
private Iterator<Shard> getIteratorFromRows( List<Row> rows){ List<Shard> shards = new ArrayList<>(rows.size()); rows.forEach(row -> { final String queueName = row.getString( ShardSerializationImpl.COLUMN_QUEUE_NAME); final String region = row.getString( ShardSerializationImpl.COLUMN_REGION); final long shardId = row.getLong( ShardSerializationImpl.COLUMN_SHARD_ID); final UUID pointer = row.getUUID( ShardSerializationImpl.COLUMN_POINTER); shards.add(new Shard(queueName, region, shardType, shardId, pointer)); nextStart = shardId; }); return shards.iterator(); }
@Override public long getLong(int i) { switch (getCassandraType(i)) { case INT: return currentRow.getInt(i); case BIGINT: case COUNTER: return currentRow.getLong(i); case TIMESTAMP: return currentRow.getTimestamp(i).getTime(); case FLOAT: return floatToRawIntBits(currentRow.getFloat(i)); default: throw new IllegalStateException("Cannot retrieve long for " + getCassandraType(i)); } }
@Override public List<SizeEstimate> getSizeEstimates(String keyspaceName, String tableName) { checkSizeEstimatesTableExist(); Statement statement = select("range_start", "range_end", "mean_partition_size", "partitions_count") .from(SYSTEM, SIZE_ESTIMATES) .where(eq("keyspace_name", keyspaceName)) .and(eq("table_name", tableName)); ResultSet result = executeWithSession(session -> session.execute(statement)); ImmutableList.Builder<SizeEstimate> estimates = ImmutableList.builder(); for (Row row : result.all()) { SizeEstimate estimate = new SizeEstimate( row.getString("range_start"), row.getString("range_end"), row.getLong("mean_partition_size"), row.getLong("partitions_count")); estimates.add(estimate); } return estimates.build(); }
case BIGINT: case COUNTER: return Long.toString(row.getLong(i)); case BOOLEAN: return Boolean.toString(row.getBool(i));
private Long retrieveCounterFromStorage( String queueName, DatabaseQueueMessage.Type type ) { Statement query = QueryBuilder.select().from( TABLE_MESSAGE_COUNTERS ) .where( QueryBuilder.eq( COLUMN_QUEUE_NAME, queueName ) ) .and( QueryBuilder.eq( COLUMN_MESSAGE_TYPE, type.toString()) ); ResultSet resultSet = cassandraClient.getQueueMessageSession().execute( query ); List<Row> all = resultSet.all(); if ( all.size() > 1 ) { throw new QakkaRuntimeException( "Multiple rows for counter " + queueName + " type " + type ); } if ( all.isEmpty() ) { return null; } return all.get(0).getLong( COLUMN_COUNTER_VALUE ); }
public static void insertIntoTableClusteringKeys(CassandraSession session, SchemaTableName table, int rowsCount) { for (Integer rowNumber = 1; rowNumber <= rowsCount; rowNumber++) { Insert insert = QueryBuilder.insertInto(table.getSchemaName(), table.getTableName()) .value("key", "key_" + rowNumber.toString()) .value("clust_one", "clust_one") .value("clust_two", "clust_two_" + rowNumber.toString()) .value("clust_three", "clust_three_" + rowNumber.toString()); session.execute(insert); } assertEquals(session.execute("SELECT COUNT(*) FROM " + table).all().get(0).getLong(0), rowsCount); }
Long retrieveCounterFromStorage( String queueName, Shard.Type type, long shardId ) { Statement query = QueryBuilder.select().from( TABLE_COUNTERS ) .where( QueryBuilder.eq( COLUMN_QUEUE_NAME, queueName ) ) .and( QueryBuilder.eq( COLUMN_SHARD_TYPE, type.toString()) ) .and( QueryBuilder.eq( COLUMN_SHARD_ID, shardId ) ); ResultSet resultSet = cassandraClient.getQueueMessageSession().execute( query ); List<Row> all = resultSet.all(); if ( all.size() > 1 ) { throw new QakkaRuntimeException( "Multiple rows for counter " + queueName + " type " + type + " shardId " + shardId ); } if ( all.isEmpty() ) { return null; } return all.get(0).getLong( COLUMN_COUNTER_VALUE ); }
public static void insertIntoTableMultiPartitionClusteringKeys(CassandraSession session, SchemaTableName table) { for (Integer rowNumber = 1; rowNumber < 10; rowNumber++) { Insert insert = QueryBuilder.insertInto(table.getSchemaName(), table.getTableName()) .value("partition_one", "partition_one_" + rowNumber.toString()) .value("partition_two", "partition_two_" + rowNumber.toString()) .value("clust_one", "clust_one") .value("clust_two", "clust_two_" + rowNumber.toString()) .value("clust_three", "clust_three_" + rowNumber.toString()); session.execute(insert); } assertEquals(session.execute("SELECT COUNT(*) FROM " + table).all().get(0).getLong(0), 9); }
public static void insertIntoTableClusteringKeysInequality(CassandraSession session, SchemaTableName table, Date date, int rowsCount) { for (Integer rowNumber = 1; rowNumber <= rowsCount; rowNumber++) { Insert insert = QueryBuilder.insertInto(table.getSchemaName(), table.getTableName()) .value("key", "key_1") .value("clust_one", "clust_one") .value("clust_two", rowNumber) .value("clust_three", date.getTime() + rowNumber * 10); session.execute(insert); } assertEquals(session.execute("SELECT COUNT(*) FROM " + table).all().get(0).getLong(0), rowsCount); }
return row.getLong(i);
@Override public Result<AuditLog> getAuditLogs( UUID messageId ) { Statement query = QueryBuilder.select().all().from(TABLE_AUDIT_LOG) .where( QueryBuilder.eq( COLUMN_MESSAGE_ID, messageId ) ); ResultSet rs = cassandraClient.getApplicationSession().execute( query ); final List<AuditLog> auditLogs = rs.all().stream().map( row -> new AuditLog( AuditLog.Action.valueOf( row.getString( COLUMN_ACTION )), AuditLog.Status.valueOf( row.getString( COLUMN_STATUS )), row.getString( COLUMN_QUEUE_NAME ), row.getString( COLUMN_REGION ), row.getUUID( COLUMN_MESSAGE_ID ), row.getUUID( COLUMN_QUEUE_MESSAGE_ID ), row.getLong( COLUMN_TRANSFER_TIME ) ) ).collect( Collectors.toList() ); return new Result<AuditLog>() { @Override public PagingState getPagingState() { return null; // no paging } @Override public List<AuditLog> getEntities() { return auditLogs; } }; }
private static void insertTestData(CassandraSession session, SchemaTableName table, Date date, int rowsCount) { for (Integer rowNumber = 1; rowNumber <= rowsCount; rowNumber++) { Insert insert = QueryBuilder.insertInto(table.getSchemaName(), table.getTableName()) .value("key", "key " + rowNumber.toString()) .value("typeuuid", UUID.fromString(String.format("00000000-0000-0000-0000-%012d", rowNumber))) .value("typeinteger", rowNumber) .value("typelong", rowNumber.longValue() + 1000) .value("typebytes", ByteBuffer.wrap(Ints.toByteArray(rowNumber)).asReadOnlyBuffer()) .value("typetimestamp", date) .value("typeansi", "ansi " + rowNumber) .value("typeboolean", rowNumber % 2 == 0) .value("typedecimal", new BigDecimal(Math.pow(2, rowNumber))) .value("typedouble", Math.pow(4, rowNumber)) .value("typefloat", (float) Math.pow(8, rowNumber)) .value("typeinet", InetAddresses.forString("127.0.0.1")) .value("typevarchar", "varchar " + rowNumber) .value("typevarint", BigInteger.TEN.pow(rowNumber)) .value("typetimeuuid", UUID.fromString(String.format("d2177dd0-eaa2-11de-a572-001b779c76e%d", rowNumber))) .value("typelist", ImmutableList.of("list-value-1" + rowNumber, "list-value-2" + rowNumber)) .value("typemap", ImmutableMap.of(rowNumber, rowNumber + 1L, rowNumber + 2, rowNumber + 3L)) .value("typeset", ImmutableSet.of(false, true)); session.execute(insert); } assertEquals(session.execute("SELECT COUNT(*) FROM " + table).all().get(0).getLong(0), rowsCount); } }
@Override public DatabaseQueue getQueue(String name) { logger.trace( "getQueue " + name ); Clause queueNameClause = QueryBuilder.eq(COLUMN_QUEUE_NAME, name); Statement query = QueryBuilder.select().all().from(TABLE_QUEUES) .where(queueNameClause); Row row = cassandraClient.getApplicationSession().execute(query).one(); if(row == null){ return null; } final String queueName = row.getString(COLUMN_QUEUE_NAME); final String regions = row.getString(COLUMN_REGIONS); final String defaultDestinations = row.getString(COLUMN_DEFAULT_DESTINATIONS); final long defaultDelayMs = row.getLong(COLUMN_DEFAULT_DELAY_MS); final int retryCount = row.getInt(COLUMN_RETRY_COUNT); final int handlingTimeoutSec = row.getInt(COLUMN_HANDLING_TIMEOUT_SEC); final String deadLetterQueue = row.getString(COLUMN_DEAD_LETTER_QUEUE); return new DatabaseQueue( queueName, regions, defaultDestinations, defaultDelayMs, retryCount, handlingTimeoutSec, deadLetterQueue); }
case BIGINT: case COUNTER: return NullableValue.of(nativeType, row.getLong(i)); case BOOLEAN: return NullableValue.of(nativeType, row.getBool(i));
row.getString( COLUMN_DEST_REGION ), row.getUUID( COLUMN_MESSAGE_ID ), row.getLong( COLUMN_TRANSFER_TIME )); transferLogs.add( tlog );
private void testInitScript(CassandraContainer cassandraContainer) { ResultSet resultSet = performQuery(cassandraContainer, "SELECT * FROM keySpaceTest.catalog_category"); assertTrue("Query was not applied", resultSet.wasApplied()); Row row = resultSet.one(); assertEquals("Inserted row is not in expected state", 1, row.getLong(0)); assertEquals("Inserted row is not in expected state", "test_category", row.getString(1)); }
public Shard loadShard(final Shard shard){ Clause queueNameClause = QueryBuilder.eq(COLUMN_QUEUE_NAME, shard.getQueueName()); Clause regionClause = QueryBuilder.eq(COLUMN_REGION, shard.getRegion()); Clause activeClause = QueryBuilder.eq(COLUMN_ACTIVE, 1); Clause shardIdClause = QueryBuilder.eq(COLUMN_SHARD_ID, shard.getShardId()); Statement select = QueryBuilder.select().from(getTableName(shard.getType())) .where(queueNameClause) .and(regionClause) .and(activeClause) .and(shardIdClause); Row row = cassandraClient.getQueueMessageSession().execute(select).one(); if (row == null){ return null; } final String queueName = row.getString(COLUMN_QUEUE_NAME); final String region = row.getString(COLUMN_REGION); final long shardId = row.getLong(COLUMN_SHARD_ID); final UUID pointer = row.getUUID(COLUMN_POINTER); return new Shard(queueName, region, shard.getType(), shardId, pointer); }