public static void robustInsert(ConsistencyLevel cl, RowMutation... mutations) { int attempts = 0; while (attempts++ < retryAttempts) { try { StorageProxy.mutate(Arrays.asList(mutations), cl); return; } catch (UnavailableException e) { } catch (TimeoutException e) { } try { Thread.sleep(retryAttemptSleep); } catch (InterruptedException e) { } } throw new RuntimeException("insert failed after 10 attempts"); }
private void mutate(List<org.apache.cassandra.db.Mutation> cmds, org.apache.cassandra.db.ConsistencyLevel clvl) throws BackendException { try { schedule(DatabaseDescriptor.getRpcTimeout()); try { if (atomicBatch) { StorageProxy.mutateAtomically(cmds, clvl); } else { StorageProxy.mutate(cmds, clvl); } } catch (RequestExecutionException e) { throw new TemporaryBackendException(e); } finally { release(); } } catch (TimeoutException ex) { log.debug("Cassandra TimeoutException", ex); throw new TemporaryBackendException(ex); } }
@SuppressWarnings("unchecked") public static void mutateWithTriggers(Collection<? extends IMutation> mutations, ConsistencyLevel consistencyLevel, boolean mutateAtomically) throws WriteTimeoutException, UnavailableException, OverloadedException, InvalidRequestException { Collection<Mutation> augmented = TriggerExecutor.instance.execute(mutations); if (augmented != null) mutateAtomically(augmented, consistencyLevel); else if (mutateAtomically) mutateAtomically((Collection<Mutation>) mutations, consistencyLevel); else mutate(mutations, consistencyLevel); }
static void mutateWithCatch(Mutation mutation) { try { StorageProxy.mutate(Collections.singletonList(mutation), ConsistencyLevel.ANY, System.nanoTime()); } catch (OverloadedException e) { Tracing.logger.warn("Too many nodes are overloaded to save trace events"); } }
@SuppressWarnings("unchecked") public static void mutateWithTriggers(Collection<? extends IMutation> mutations, ConsistencyLevel consistencyLevel, boolean mutateAtomically, long queryStartNanoTime) throws WriteTimeoutException, WriteFailureException, UnavailableException, OverloadedException, InvalidRequestException { Collection<Mutation> augmented = TriggerExecutor.instance.execute(mutations); boolean updatesView = Keyspace.open(mutations.iterator().next().getKeyspaceName()) .viewManager .updatesAffectView(mutations, true); if (augmented != null) mutateAtomically(augmented, consistencyLevel, updatesView, queryStartNanoTime); else { if (mutateAtomically || updatesView) mutateAtomically((Collection<Mutation>) mutations, consistencyLevel, updatesView, queryStartNanoTime); else mutate(mutations, consistencyLevel, queryStartNanoTime); } }
static void mutateWithCatch(Mutation mutation) { try { StorageProxy.mutate(Collections.singletonList(mutation), ConsistencyLevel.ANY, System.nanoTime()); } catch (OverloadedException e) { Tracing.logger.warn("Too many nodes are overloaded to save trace events"); } }
@SuppressWarnings("unchecked") public static void mutateWithTriggers(Collection<? extends IMutation> mutations, ConsistencyLevel consistencyLevel, boolean mutateAtomically, long queryStartNanoTime) throws WriteTimeoutException, WriteFailureException, UnavailableException, OverloadedException, InvalidRequestException { Collection<Mutation> augmented = TriggerExecutor.instance.execute(mutations); boolean updatesView = Keyspace.open(mutations.iterator().next().getKeyspaceName()) .viewManager .updatesAffectView(mutations, true); if (augmented != null) mutateAtomically(augmented, consistencyLevel, updatesView, queryStartNanoTime); else { if (mutateAtomically || updatesView) mutateAtomically((Collection<Mutation>) mutations, consistencyLevel, updatesView, queryStartNanoTime); else mutate(mutations, consistencyLevel, queryStartNanoTime); } }
static void mutateWithCatch(Mutation mutation) { try { StorageProxy.mutate(Collections.singletonList(mutation), ConsistencyLevel.ANY, System.nanoTime()); } catch (OverloadedException e) { Tracing.logger.warn("Too many nodes are overloaded to save trace events"); } }
@SuppressWarnings("unchecked") public static void mutateWithTriggers(Collection<? extends IMutation> mutations, ConsistencyLevel consistencyLevel, boolean mutateAtomically, long queryStartNanoTime) throws WriteTimeoutException, WriteFailureException, UnavailableException, OverloadedException, InvalidRequestException { Collection<Mutation> augmented = TriggerExecutor.instance.execute(mutations); boolean updatesView = Keyspace.open(mutations.iterator().next().getKeyspaceName()) .viewManager .updatesAffectView(mutations, true); if (augmented != null) mutateAtomically(augmented, consistencyLevel, updatesView, queryStartNanoTime); else { if (mutateAtomically || updatesView) mutateAtomically((Collection<Mutation>) mutations, consistencyLevel, updatesView, queryStartNanoTime); else mutate(mutations, consistencyLevel, queryStartNanoTime); } }
static void mutateWithCatch(Mutation mutation) { try { StorageProxy.mutate(Arrays.asList(mutation), ConsistencyLevel.ANY); } catch (UnavailableException | WriteTimeoutException e) { // should never happen; ANY does not throw UAE or WTE throw new AssertionError(e); } catch (OverloadedException e) { logger.warn("Too many nodes are overloaded to save trace events"); } } }
private void mutate(List<RowMutation> cmds, org.apache.cassandra.db.ConsistencyLevel clvl) throws BackendException { try { schedule(DatabaseDescriptor.getRpcTimeout()); try { if (atomicBatch) { StorageProxy.mutateAtomically(cmds, clvl); } else { StorageProxy.mutate(cmds, clvl); } } catch (RequestExecutionException e) { throw new TemporaryBackendException(e); } finally { release(); } } catch (TimeoutException ex) { log.debug("Cassandra TimeoutException", ex); throw new TemporaryBackendException(ex); } }
@Override public ResponseData batchMutate(Object key, Map<?, ?> nv) throws OperationException { ByteBuffer rKey = kser.toByteBuffer(key); RowMutation change = new RowMutation(ks, rKey); for (Map.Entry entry : nv.entrySet()) { ByteBuffer name = colser.toByteBuffer(entry.getKey()); ByteBuffer val = valser.toByteBuffer(entry.getValue()); ColumnPath cp = new ColumnPath(cf).setColumn(name); change.add(new QueryPath(cp), val, System.currentTimeMillis()); } try { StorageProxy.mutate(Arrays.asList(change), wConsistecy); } catch (Exception e) { throw new OperationException(e); } return new ResponseData("", 0, ""); }
private void mutate(List<org.apache.cassandra.db.Mutation> cmds, org.apache.cassandra.db.ConsistencyLevel clvl) throws BackendException { try { schedule(DatabaseDescriptor.getRpcTimeout()); try { if (atomicBatch) { StorageProxy.mutateAtomically(cmds, clvl); } else { StorageProxy.mutate(cmds, clvl); } } catch (RequestExecutionException e) { throw new TemporaryBackendException(e); } finally { release(); } } catch (TimeoutException ex) { log.debug("Cassandra TimeoutException", ex); throw new TemporaryBackendException(ex); } }
@Override public ResponseData put(Object key, Object colName, Object value) throws OperationException { ByteBuffer rKey = kser.toByteBuffer(key); ByteBuffer name = colser.toByteBuffer(colName); ByteBuffer val = valser.toByteBuffer(value); RowMutation change = new RowMutation(ks, rKey); ColumnPath cp = new ColumnPath(cf).setColumn(name); change.add(new QueryPath(cp), val, System.currentTimeMillis()); try { StorageProxy.mutate(Arrays.asList(change), wConsistecy); } catch (Exception e) { throw new OperationException(e); } return new ResponseData("", 0, ""); }