/** * {@inheritDoc} */ @Override public List<Statement> map(Map<String, Object> conf, Session session, ITuple tuple) { final BatchStatement batch = new BatchStatement(this.type); for (CQLStatementTupleMapper m : mappers) batch.addAll(m.map(conf, session, tuple)); return Arrays.asList((Statement) batch); } }
protected void executeBatch(Statement... statements) { LOG.debug("Execute cassandra list of statements"); if (LOG.isDebugEnabled()) { LOG.debug("Execute cassandra statements {} ", Arrays.toString(statements)); } BatchStatement batchStatement = new BatchStatement(getBatchType()); for (Statement statement : statements) { batchStatement.add(statement); } executeBatch(batchStatement); }
@Override public PairBatchStatementTuples apply(List<PairStatementTuple> l) { final List<Tuple> inputs = new LinkedList<>(); final BatchStatement batch = new BatchStatement(BatchStatement.Type.UNLOGGED); for (PairStatementTuple pair : l) { batch.add(pair.getStatement()); inputs.add(pair.getTuple()); } return new PairBatchStatementTuples(inputs, batch); } });
public void updateState(List<TridentTuple> tuples, final TridentCollector collector) { List<Statement> statements = new ArrayList<>(); for (TridentTuple tuple : tuples) { statements.addAll(options.cqlStatementTupleMapper.map(conf, session, tuple)); } try { if (options.batchingType != null) { BatchStatement batchStatement = new BatchStatement(options.batchingType); batchStatement.addAll(statements); session.execute(batchStatement); } else { for (Statement statement : statements) { session.execute(statement); } } } catch (Exception e) { LOG.warn("Batch write operation is failed."); collector.reportError(e); throw new FailedException(e); } }
private void addBoundStatement(BoundStatement boundStatement) { Iterator<Host> hosts = m_loadBalancingPolicy.newQueryPlan(m_clusterConnection.getKeyspace(), boundStatement); if (hosts.hasNext()) { Host hostKey = hosts.next(); BatchStatement batchStatement = m_batchMap.get(hostKey); if (batchStatement == null) { batchStatement = new BatchStatement(BatchStatement.Type.UNLOGGED); m_batchMap.put(hostKey, batchStatement); } batchStatement.add(boundStatement); } else { dataPointBatch.add(boundStatement); } }
private void confirmUniqueFields( MvccEntity mvccEntity, UUID version, ApplicationScope scope, MutationBatch logMutation) { final Entity entity = mvccEntity.getEntity().get(); // re-write the unique values but this time with no TTL final BatchStatement uniqueBatch = new BatchStatement(); for ( Field field : EntityUtils.getUniqueFields(mvccEntity.getEntity().get()) ) { UniqueValue written = new UniqueValueImpl( field, entity.getId(), version); uniqueBatch.add(uniqueValueStrat.writeCQL(scope, written, -1 )); logger.debug("Finalizing {} unique value {}", field.getName(), field.getValue().toString()); } try { logMutation.execute(); session.execute(uniqueBatch); } catch ( ConnectionException e ) { logger.error( "Failed to execute write asynchronously ", e ); throw new WriteCommitException( mvccEntity, scope, "Failed to execute write asynchronously ", e ); } }
@Override public void deleteAllShards(String queueName, String region) { BatchStatement batch = new BatchStatement(); Shard.Type[] shardTypes = new Shard.Type[]{Shard.Type.DEFAULT, Shard.Type.INFLIGHT}; for (Shard.Type shardType : shardTypes) { Statement delete = QueryBuilder.delete().from( getTableName( shardType ) ) .where( QueryBuilder.eq(COLUMN_QUEUE_NAME, queueName) ) .and( QueryBuilder.eq(COLUMN_REGION, region) ); logger.trace("Removing shards for queue {} region {} shardType {} query {}", queueName, region, shardType, batch.toString()); batch.add( delete ); } cassandraClient.getQueueMessageSession().execute( batch ); }
BatchStatement batchStatement = new BatchStatement(); tasks.forEach(task -> { String taskPayload = toJson(task);
@Override public void deleteTokens(final List<UUID> tokenUUIDs, final ByteBuffer principalKeyBuffer){ Preconditions.checkNotNull(tokenUUIDs, "token UUID list is required"); Preconditions.checkNotNull(tokenUUIDs, "principalKeyBuffer is required"); logger.trace("deleteTokens, token UUIDs: {}", tokenUUIDs); final BatchStatement batchStatement = new BatchStatement(); tokenUUIDs.forEach( tokenUUID -> batchStatement.add( QueryBuilder.delete() .from(TOKENS_TABLE) .where(QueryBuilder .eq("key", DataType.uuid().serialize(tokenUUID, ProtocolVersion.NEWEST_SUPPORTED))) ) ); batchStatement.add( QueryBuilder.delete() .from(PRINCIPAL_TOKENS_TABLE) .where(QueryBuilder .eq("key", principalKeyBuffer))); session.execute(batchStatement); }
public void call( final Throwable t ) { if ( t instanceof CollectionRuntimeException ) { CollectionRuntimeException cre = ( CollectionRuntimeException ) t; final MvccEntity mvccEntity = cre.getEntity(); final ApplicationScope scope = cre.getApplicationScope(); // one batch to handle rollback MutationBatch rollbackMb = null; final BatchStatement uniqueDeleteBatch = new BatchStatement(); final Optional<Entity> entity = mvccEntity.getEntity(); if ( entity.isPresent() ) { for ( final Field field : entity.get().getFields() ) { // if it's unique, add its deletion to the rollback batch if ( field.isUnique() ) { UniqueValue toDelete = new UniqueValueImpl( field, entity.get().getId(), mvccEntity.getVersion() ); uniqueDeleteBatch.add(uniqueValueStrat.deleteCQL(scope, toDelete )); } } // execute the batch statements for deleting unique field entries session.execute(uniqueDeleteBatch); logEntryStrat.delete( scope, entity.get().getId(), mvccEntity.getVersion() ); } } }
@Override public BatchStatement deleteCQL( final ApplicationScope applicationScope, final UniqueValue uniqueValue ) { final MigrationRelationship<UniqueValueSerializationStrategy> migration = getMigrationRelationShip(); if ( migration.needsMigration() ) { final BatchStatement batch = new BatchStatement(); batch.add(migration.from.deleteCQL( applicationScope, uniqueValue ) ); batch.add(migration.to.deleteCQL( applicationScope, uniqueValue ) ); return batch; } return migration.to.deleteCQL( applicationScope, uniqueValue ); }
BatchStatement deleteAllBatch = new BatchStatement(); for ( Shard.Type shardType : shardTypes ) { ShardIterator defaultShardIterator = new ShardIterator( cassandraClient,
final BatchStatement batchStatement = new BatchStatement(); final Using usingTTL = QueryBuilder.ttl(ttl);
private boolean removeTask(Task task) { // TODO: calculate shard number based on seq and maxTasksPerShard try { // get total tasks for this workflow WorkflowMetadata workflowMetadata = getWorkflowMetadata(task.getWorkflowInstanceId()); int totalTasks = workflowMetadata.getTotalTasks(); // remove from task_lookup table removeTaskLookup(task); recordCassandraDaoRequests("removeTask", task.getTaskType(), task.getWorkflowType()); // delete task from workflows table and decrement total tasks by 1 BatchStatement batchStatement = new BatchStatement(); batchStatement.add(deleteTaskStatement.bind(UUID.fromString(task.getWorkflowInstanceId()), DEFAULT_SHARD_ID, task.getTaskId())); batchStatement.add(updateTotalTasksStatement.bind(totalTasks - 1, UUID.fromString(task.getWorkflowInstanceId()), DEFAULT_SHARD_ID)); ResultSet resultSet = session.execute(batchStatement); return resultSet.wasApplied(); } catch (Exception e) { Monitors.error(CLASS_NAME, "removeTask"); String errorMsg = String.format("Failed to remove task: %s", task.getTaskId()); LOGGER.error(errorMsg, e); throw new ApplicationException(ApplicationException.Code.BACKEND_ERROR, errorMsg); } }
@Override public void revokeToken(final UUID tokenUUID, final ByteBuffer principalKeyBuffer){ Preconditions.checkNotNull(tokenUUID, "token UUID is required"); logger.trace("revokeToken, token UUID: {}", tokenUUID); final BatchStatement batchStatement = new BatchStatement(); batchStatement.add( QueryBuilder.delete() .from(TOKENS_TABLE) .where(QueryBuilder .eq("key", DataType.uuid().serialize(tokenUUID, ProtocolVersion.NEWEST_SUPPORTED)))); if(principalKeyBuffer != null){ batchStatement.add( QueryBuilder.delete() .from(PRINCIPAL_TOKENS_TABLE) .where(QueryBuilder .eq("key", principalKeyBuffer)) .and(QueryBuilder .eq("column1", DataType.uuid().serialize(tokenUUID, ProtocolVersion.NEWEST_SUPPORTED)))); } session.execute(batchStatement); }
@Override public void putUuid( final MapScope scope, final String key, final UUID putUuid ) { Preconditions.checkNotNull( scope, "mapscope is required" ); Preconditions.checkNotNull( key, "key is required" ); Preconditions.checkNotNull( putUuid, "value is required" ); final BatchStatement batchStatement = new BatchStatement(); batchStatement.add(QueryBuilder.insertInto(MAP_ENTRIES_TABLE) .value("key", getMapEntryPartitionKey(scope, key)) .value("column1", DataType.cboolean().serialize(true, ProtocolVersion.NEWEST_SUPPORTED)) .value("value", DataType.uuid().serialize(putUuid, ProtocolVersion.NEWEST_SUPPORTED))); final int bucket = BUCKET_LOCATOR.getCurrentBucket( scope.getName() ); batchStatement.add(QueryBuilder.insertInto(MAP_KEYS_TABLE) .value("key", getMapKeyPartitionKey(scope, bucket)) .value("column1", DataType.text().serialize(key, ProtocolVersion.NEWEST_SUPPORTED)) .value("value", DataType.serializeValue(null, ProtocolVersion.NEWEST_SUPPORTED))); session.execute(batchStatement); }
BatchStatement batchStatement = new BatchStatement(); batchStatement.add( insert ); batchStatement.add( delete );
BatchStatement batchStatement = new BatchStatement(); batchStatement.add( write ); batchStatement.add( delete );
Preconditions.checkNotNull( value, "value is required" ); final BatchStatement batchStatement = new BatchStatement();
final BatchStatement batch = new BatchStatement();