private static AstyanaxContext<Cluster> createCluster(AstyanaxContext.Builder cb) { AstyanaxContext<Cluster> clusterCtx = cb.buildCluster(ThriftFamilyFactory.getInstance()); clusterCtx.start(); return clusterCtx; }
@Override public void doOp( final ColumnListMutation<UUID> colMutation ) { colMutation.deleteColumn( version ); } } );
@Override void writeEdge( final MutationBatch batch, final MultiTenantColumnFamily<ScopedRowKey<RowKey>, DirectedEdge> columnFamily, final ApplicationScope scope, final RowKey rowKey, final DirectedEdge edge, final Shard shard, final boolean isDeleted ) { batch.withRow( columnFamily, ScopedRowKey.fromKey( scope.getApplication(), rowKey ) ).putColumn( edge, isDeleted ); } }.createBatch( scope, shards, timestamp );
@Override void writeEdge( final MutationBatch batch, final MultiTenantColumnFamily<ScopedRowKey<RowKey>, DirectedEdge> columnFamily, final ApplicationScope scope, final RowKey rowKey, final DirectedEdge edge, final Shard shard, final boolean isDeleted ) { batch.withRow( columnFamily, ScopedRowKey.fromKey( scope.getApplication(), rowKey ) ).deleteColumn( edge ); } }.createBatch( scope, shards, timestamp );
/** * Do the write on the correct row for the entity id with the operation */ private MutationBatch doWrite( final ApplicationScope applicationScope, final Id entityId, final UUID version, final RowOp op ) { final MutationBatch batch = keyspace.prepareMutationBatch(); final Id applicationId = applicationScope.getApplication(); final ScopedRowKey<Id> rowKey = ScopedRowKey.fromKey( applicationId, entityId ); final long timestamp = version.timestamp(); op.doOp( batch.withRow( CF_ENTITY_DATA, rowKey ).setTimestamp( timestamp ) ); return batch; }
@Override public Map<String, String> getCompressionOptions(String cf) throws BackendException { try { Keyspace k = keyspaceContext.getClient(); KeyspaceDefinition kdef = k.describeKeyspace(); if (null == kdef) { throw new PermanentBackendException("Keyspace " + k.getKeyspaceName() + " is undefined"); } ColumnFamilyDefinition cfdef = kdef.getColumnFamily(cf); if (null == cfdef) { throw new PermanentBackendException("Column family " + cf + " is undefined"); } return cfdef.getCompressionOptions(); } catch (ConnectionException e) { throw new PermanentBackendException(e); } } }
@Override public void clearStorage() throws BackendException { try { Cluster cluster = clusterContext.getClient(); Keyspace ks = cluster.getKeyspace(keySpaceName); // Not a big deal if Keyspace doesn't not exist (dropped manually by user or tests). // This is called on per test setup basis to make sure that previous test cleaned // everything up, so first invocation would always fail as Keyspace doesn't yet exist. if (ks == null) return; for (ColumnFamilyDefinition cf : cluster.describeKeyspace(keySpaceName).getColumnFamilyList()) { ks.truncateColumnFamily(new ColumnFamily<Object, Object>(cf.getName(), null, null)); } } catch (ConnectionException e) { throw new PermanentBackendException(e); } }
@Override public MutationBatch removeIdTypeToTarget( final ApplicationScope scope, final Edge edge ) { final MigrationRelationship<EdgeMetadataSerialization> migration = getMigrationRelationShip(); if ( migration.needsMigration() ) { final MutationBatch aggregateBatch = keyspace.prepareMutationBatch(); aggregateBatch.mergeShallow( migration.from.removeIdTypeToTarget( scope, edge ) ); aggregateBatch.mergeShallow( migration.to.removeIdTypeToTarget( scope, edge ) ); return aggregateBatch; } return migration.to.removeIdTypeToTarget( scope, edge ); }
@Override public void setVersion(final String pluginName, final int version ) { final ScopedRowKey<String> rowKey = ScopedRowKey.fromKey( STATIC_ID, pluginName); try { keyspace.prepareColumnMutation( CF_MIGRATION_INFO, rowKey, COLUMN_VERSION ).putValue( version, null ) .execute(); } catch ( ConnectionException e ) { throw new DataMigrationException( "Unable to save status", e ); } }
@Override @SuppressWarnings("unchecked") public IPartitioner getCassandraPartitioner() throws BackendException { Cluster cl = clusterContext.getClient(); try { return FBUtilities.newPartitioner(cl.describePartitioner()); } catch (ConnectionException e) { throw new TemporaryBackendException(e); } catch (ConfigurationException e) { throw new PermanentBackendException(e); } }
@Override void writeEdge( final MutationBatch batch, final MultiTenantColumnFamily<ScopedRowKey<RowKey>, DirectedEdge> columnFamily, final ApplicationScope scope, final RowKey rowKey, final DirectedEdge edge, final Shard shard, final boolean isDeleted ) { batch.withRow( columnFamily, ScopedRowKey.fromKey( scope.getApplication(), rowKey ) ).putColumn( edge, isDeleted ); } }.createBatch( scope, shards, timestamp );
@Override void writeEdge( final MutationBatch batch, final MultiTenantColumnFamily<ScopedRowKey<RowKey>, DirectedEdge> columnFamily, final ApplicationScope scope, final RowKey rowKey, final DirectedEdge edge, final Shard shard, final boolean isDeleted ) { batch.withRow( columnFamily, ScopedRowKey.fromKey( scope.getApplication(), rowKey ) ).deleteColumn( edge ); } }.createBatch( scope, shards, timestamp );
@Override public MutationBatch mark( final ApplicationScope context, final Id entityId, final UUID version ) { final MigrationRelationship<MvccEntitySerializationStrategy> migration = getMigrationRelationShip(); if ( migration.needsMigration() ) { final MutationBatch aggregateBatch = keyspace.prepareMutationBatch(); aggregateBatch.mergeShallow( migration.from.mark( context, entityId, version ) ); aggregateBatch.mergeShallow( migration.to.mark( context, entityId, version ) ); return aggregateBatch; } return migration.to.mark( context, entityId, version ); }
@Override public void setStatusCode(final String pluginName, final int status ) { final ScopedRowKey<String> rowKey = ScopedRowKey.fromKey( STATIC_ID, pluginName); try { keyspace.prepareColumnMutation( CF_MIGRATION_INFO, rowKey, COLUMN_STATUS_CODE ).putValue( status, null ) .execute(); } catch ( ConnectionException e ) { throw new DataMigrationException( "Unable to save status", e ); } }
@Override void writeEdge( final MutationBatch batch, final MultiTenantColumnFamily<ScopedRowKey<RowKeyType>, DirectedEdge> columnFamily, final ApplicationScope scope, final RowKeyType rowKey, final DirectedEdge edge, final Shard shard, final boolean isDeleted ) { batch.withRow( columnFamily, ScopedRowKey.fromKey( scope.getApplication(), rowKey ) ).putColumn( edge, isDeleted ); } }.createBatch( scope, shards, timestamp );
@Override void writeEdge( final MutationBatch batch, final MultiTenantColumnFamily<ScopedRowKey<RowKeyType>, DirectedEdge> columnFamily, final ApplicationScope scope, final RowKeyType rowKey, final DirectedEdge edge, final Shard shard, final boolean isDeleted ) { batch.withRow( columnFamilies.getTargetNodeSourceTypeCfName(), ScopedRowKey.fromKey( scope.getApplication(), rowKey ) ) .deleteColumn( edge ); } }.createBatch( scope, shards, timestamp );
@Override public MutationBatch delete( final ApplicationScope context, final Id entityId, final UUID version ) { final MigrationRelationship<MvccEntitySerializationStrategy> migration = getMigrationRelationShip(); if ( migration.needsMigration() ) { final MutationBatch aggregateBatch = keyspace.prepareMutationBatch(); aggregateBatch.mergeShallow( migration.from.delete( context, entityId, version ) ); aggregateBatch.mergeShallow( migration.to.delete( context, entityId, version ) ); return aggregateBatch; } return migration.to.delete( context, entityId, version ); }
@Override public void setStatusMessage(final String pluginName, final String message ) { final ScopedRowKey<String> rowKey = ScopedRowKey.fromKey( STATIC_ID, pluginName); try { keyspace.prepareColumnMutation( CF_MIGRATION_INFO, rowKey, COL_STATUS_MESSAGE ).putValue( message, null ) .execute(); } catch ( ConnectionException e ) { throw new DataMigrationException( "Unable to save status", e ); } }
@Override void writeEdge( final MutationBatch batch, final MultiTenantColumnFamily<ScopedRowKey<EdgeRowKey>, Long> columnFamily, final ApplicationScope scope, final EdgeRowKey rowKey, final Long column, final Shard shard, final boolean isDeleted ) { batch.withRow( columnFamilies.getGraphEdgeVersions(), ScopedRowKey.fromKey( scope.getApplication(), rowKey ) ) .putColumn( column, isDeleted ); } }.createBatch( scope, shards, timestamp );