private void mutate(List<org.apache.cassandra.db.Mutation> cmds, org.apache.cassandra.db.ConsistencyLevel clvl) throws BackendException { try { schedule(DatabaseDescriptor.getRpcTimeout()); try { if (atomicBatch) { StorageProxy.mutateAtomically(cmds, clvl); } else { StorageProxy.mutate(cmds, clvl); } } catch (RequestExecutionException e) { throw new TemporaryBackendException(e); } finally { release(); } } catch (TimeoutException ex) { log.debug("Cassandra TimeoutException", ex); throw new TemporaryBackendException(ex); } }
rows = StorageProxy.read(Arrays.asList(rc), cl); return rows;
rows = StorageProxy.getRangeSlice(cmd, ConsistencyLevel.QUORUM); } catch (Exception e) { throw new PermanentBackendException(e);
Pair<List<InetAddress>, Integer> p = getPaxosParticipants(metadata, key, consistencyForPaxos); List<InetAddress> liveEndpoints = p.left; int requiredParticipants = p.right; final Pair<UUID, Integer> pair = beginAndRepairPaxos(queryStartNanoTime, key, metadata, liveEndpoints, requiredParticipants, consistencyForPaxos, consistencyForCommit, true, state); final UUID ballot = pair.left; contentions += pair.right; try (RowIterator rowIter = readOne(readCommand, readConsistency, queryStartNanoTime)) if (proposePaxos(proposal, liveEndpoints, requiredParticipants, true, consistencyForPaxos, queryStartNanoTime)) commitPaxos(proposal, consistencyForCommit, true, queryStartNanoTime); Tracing.trace("CAS successful"); return null; recordCasContention(contentions); final long latency = System.nanoTime() - startTimeForMetrics; casWriteMetrics.addNano(latency);
final BatchlogEndpoints batchlogEndpoints = getBatchlogEndpoints(localDataCenter, batchConsistencyLevel); final UUID batchUUID = UUIDGen.getTimeUUID(); BatchlogResponseHandler.BatchlogCleanup cleanup = new BatchlogResponseHandler.BatchlogCleanup(mutations.size(), () -> asyncRemoveFromBatchlog(batchlogEndpoints, batchUUID, queryStartNanoTime)); WriteResponseHandlerWrapper wrapper = wrapBatchResponseHandler(mutation, consistency_level, batchConsistencyLevel, syncWriteToBatchlog(mutations, batchlogEndpoints, batchUUID, queryStartNanoTime); syncWriteBatchedMutations(wrappers, localDataCenter, Stage.MUTATION);
Pair<List<InetAddress>, Integer> p = getPaxosParticipants(keyspaceName, key, consistencyForPaxos); List<InetAddress> liveEndpoints = p.left; int requiredParticipants = p.right; final Pair<UUID, Integer> pair = beginAndRepairPaxos(start, key, metadata, liveEndpoints, requiredParticipants, consistencyForPaxos, consistencyForCommit, true, state); final UUID ballot = pair.left; contentions += pair.right; long timestamp = System.currentTimeMillis(); ReadCommand readCommand = ReadCommand.create(keyspaceName, key, cfName, timestamp, request.readFilter()); List<Row> rows = read(Arrays.asList(readCommand), consistencyForPaxos == ConsistencyLevel.LOCAL_SERIAL ? ConsistencyLevel.LOCAL_QUORUM : ConsistencyLevel.QUORUM); if (proposePaxos(proposal, liveEndpoints, requiredParticipants, true, consistencyForPaxos)) commitPaxos(proposal, consistencyForCommit, true); Tracing.trace("CAS successful"); return null;
Pair<List<InetAddress>, Integer> p = getPaxosParticipants(metadata, key, consistencyLevel); List<InetAddress> liveEndpoints = p.left; int requiredParticipants = p.right; final Pair<UUID, Integer> pair = beginAndRepairPaxos(start, key, metadata, liveEndpoints, requiredParticipants, consistencyLevel, consistencyForCommitOrFetch, false, state); if (pair.right > 0) casReadMetrics.contention.update(pair.right); result = fetchRows(group.commands, consistencyForCommitOrFetch, queryStartNanoTime);
WriteResponseHandlerWrapper wrapper = wrapResponseHandler(mutation, consistency_level, WriteType.BATCH); Collection<InetAddress> batchlogEndpoints = getBatchlogEndpoints(localDataCenter, consistency_level); UUID batchUUID = UUIDGen.getTimeUUID(); syncWriteToBatchlog(mutations, batchlogEndpoints, batchUUID); syncWriteBatchedMutations(wrappers, localDataCenter); asyncRemoveFromBatchlog(batchlogEndpoints, batchUUID);
public static void robustInsert(ConsistencyLevel cl, RowMutation... mutations) { int attempts = 0; while (attempts++ < retryAttempts) { try { StorageProxy.mutate(Arrays.asList(mutations), cl); return; } catch (UnavailableException e) { } catch (TimeoutException e) { } try { Thread.sleep(retryAttemptSleep); } catch (InterruptedException e) { } } throw new RuntimeException("insert failed after 10 attempts"); }
private ResultMessage.Rows execute(Pageable command, QueryOptions options, int limit, long now, QueryState state) throws RequestValidationException, RequestExecutionException { List<Row> rows; if (command == null) { rows = Collections.<Row>emptyList(); } else { rows = command instanceof Pageable.ReadCommands ? StorageProxy.read(((Pageable.ReadCommands)command).commands, options.getConsistency(), state.getClientState()) : StorageProxy.getRangeSlice((RangeSliceCommand)command, options.getConsistency()); } return processResults(rows, options, limit, now); }
protected RangeForQuery computeNext() { if (!ranges.hasNext()) { return endOfData(); } AbstractBounds<PartitionPosition> range = ranges.next(); List<InetAddress> liveEndpoints = StorageProxy.getLiveSortedEndpoints(keyspace, range.right); return new RangeForQuery(range, liveEndpoints, consistency.filterForQuery(keyspace, liveEndpoints)); } }
BatchlogResponseHandler.BatchlogCleanup cleanup = new BatchlogResponseHandler.BatchlogCleanup(mutations.size(), () -> asyncRemoveFromBatchlog(batchlogEndpoints, batchUUID)); wrappers.add(wrapViewBatchResponseHandler(mutation, consistencyLevel, consistencyLevel, asyncWriteBatchedMutations(wrappers, localDataCenter, Stage.VIEW_MUTATION);
InetAddress endpoint = findSuitableEndpoint(cm.getKeyspaceName(), cm.key(), localDataCenter, cm.consistency()); return applyCounterMutationOnCoordinator(cm, localDataCenter, queryStartNanoTime);
ranges = command.keyRange.unwrap(); else ranges = getRestrictedRanges(command.keyRange); float resultRowsPerRange = estimateResultRowsPerRange(command, keyspace); : nextRange; List<InetAddress> liveEndpoints = nextEndpoints == null ? getLiveSortedEndpoints(keyspace, range.right) : nextEndpoints; List<InetAddress> filteredEndpoints = nextFilteredEndpoints == null nextEndpoints = getLiveSortedEndpoints(keyspace, nextRange.right); nextFilteredEndpoints = consistency_level.filterForQuery(keyspace, nextEndpoints); break; List<InetAddress> merged = intersection(liveEndpoints, nextEndpoints);
public static <T extends RingPosition<T>> List<AbstractBounds<T>> getRestrictedRanges(final AbstractBounds<T> queryRange) { return StorageProxy.getRestrictedRanges(queryRange); }
public void doVerb(final MessageIn<CounterMutation> message, final int id) { long queryStartNanoTime = System.nanoTime(); final CounterMutation cm = message.payload; logger.trace("Applying forwarded {}", cm); String localDataCenter = DatabaseDescriptor.getEndpointSnitch().getDatacenter(FBUtilities.getBroadcastAddress()); // We should not wait for the result of the write in this thread, // otherwise we could have a distributed deadlock between replicas // running this VerbHandler (see #4578). // Instead, we use a callback to send the response. Note that the callback // will not be called if the request timeout, but this is ok // because the coordinator of the counter mutation will timeout on // it's own in that case. StorageProxy.applyCounterMutationOnLeader(cm, localDataCenter, new Runnable() { public void run() { MessagingService.instance().sendReply(WriteResponse.createMessage(), id, message.from); } }, queryStartNanoTime); } }
private static void asyncRemoveFromBatchlog(BatchlogEndpoints endpoints, UUID uuid, long queryStartNanoTime) { if (!endpoints.current.isEmpty()) asyncRemoveFromBatchlog(endpoints.current, uuid); if (!endpoints.legacy.isEmpty()) LegacyBatchlogMigrator.asyncRemoveFromBatchlog(endpoints.legacy, uuid, queryStartNanoTime); }
Pair<List<InetAddress>, Integer> p = getPaxosParticipants(metadata, key, consistencyForPaxos); List<InetAddress> liveEndpoints = p.left; int requiredParticipants = p.right; final Pair<UUID, Integer> pair = beginAndRepairPaxos(queryStartNanoTime, key, metadata, liveEndpoints, requiredParticipants, consistencyForPaxos, consistencyForCommit, true, state); final UUID ballot = pair.left; contentions += pair.right; try (RowIterator rowIter = readOne(readCommand, readConsistency, queryStartNanoTime)) if (proposePaxos(proposal, liveEndpoints, requiredParticipants, true, consistencyForPaxos, queryStartNanoTime)) commitPaxos(proposal, consistencyForCommit, true, queryStartNanoTime); Tracing.trace("CAS successful"); return null; recordCasContention(contentions); final long latency = System.nanoTime() - startTimeForMetrics; casWriteMetrics.addNano(latency);
final BatchlogEndpoints batchlogEndpoints = getBatchlogEndpoints(localDataCenter, batchConsistencyLevel); final UUID batchUUID = UUIDGen.getTimeUUID(); BatchlogResponseHandler.BatchlogCleanup cleanup = new BatchlogResponseHandler.BatchlogCleanup(mutations.size(), () -> asyncRemoveFromBatchlog(batchlogEndpoints, batchUUID, queryStartNanoTime)); WriteResponseHandlerWrapper wrapper = wrapBatchResponseHandler(mutation, consistency_level, batchConsistencyLevel, syncWriteToBatchlog(mutations, batchlogEndpoints, batchUUID, queryStartNanoTime); syncWriteBatchedMutations(wrappers, localDataCenter, Stage.MUTATION);
Pair<List<InetAddress>, Integer> p = getPaxosParticipants(metadata, key, consistencyLevel); List<InetAddress> liveEndpoints = p.left; int requiredParticipants = p.right; final Pair<UUID, Integer> pair = beginAndRepairPaxos(start, key, metadata, liveEndpoints, requiredParticipants, consistencyLevel, consistencyForCommitOrFetch, false, state); if (pair.right > 0) casReadMetrics.contention.update(pair.right); result = fetchRows(group.commands, consistencyForCommitOrFetch, queryStartNanoTime);