private CLevel() { db = org.apache.cassandra.db.ConsistencyLevel.valueOf(toString()); thrift = org.apache.cassandra.thrift.ConsistencyLevel.valueOf(toString()); astyanax = com.netflix.astyanax.model.ConsistencyLevel.valueOf("CL_" + toString()); }
private RuntimeException wrapIfConsistencyAll(UnavailableException ex) { if (consistency.equals(ConsistencyLevel.ALL)) { throw new InsufficientConsistencyException("This operation requires all Cassandra" + " nodes to be up and available.", ex); } else { throw Throwables.throwUncheckedException(ex); } }
@Override public <T> T execute(final String query, Object connection) { Session session = factory.getConnection(); try { Statement queryStmt = new SimpleStatement(query); KunderaCoreUtils.printQuery(query, showQuery); queryStmt.setConsistencyLevel(ConsistencyLevel.valueOf(this.consistencyLevel.name())); return (T) session.execute(queryStmt); } catch (Exception e) { log.error("Error while executing query {}.", query); throw new KunderaException(e); } finally { // factory.releaseConnection(session); } }
public void read(org.apache.thrift.protocol.TProtocol iprot, MultiSliceRequest struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) schemeField = iprot.readFieldBegin(); if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { break; case 1: // KEY if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.key = iprot.readBinary(); struct.setKeyIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); struct.column_parent = new ColumnParent(); struct.column_parent.read(iprot); struct.setColumn_parentIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); case 6: // CONSISTENCY_LEVEL if (schemeField.type == org.apache.thrift.protocol.TType.I32) { struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); struct.setConsistency_levelIsSet(true); } else {
public void write(org.apache.thrift.protocol.TProtocol oprot, MultiSliceRequest struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.key != null) { if (struct.isSetKey()) { oprot.writeFieldBegin(KEY_FIELD_DESC); oprot.writeBinary(struct.key); oprot.writeFieldEnd(); if (struct.isSetColumn_parent()) { oprot.writeFieldBegin(COLUMN_PARENT_FIELD_DESC); struct.column_parent.write(oprot); oprot.writeFieldEnd(); if (struct.isSetConsistency_level()) { oprot.writeFieldBegin(CONSISTENCY_LEVEL_FIELD_DESC); oprot.writeI32(struct.consistency_level.getValue()); oprot.writeFieldEnd();
@Override public void read(org.apache.thrift.protocol.TProtocol prot, MultiSliceRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(6); if (incoming.get(0)) { struct.key = iprot.readBinary(); struct.setKeyIsSet(true); struct.column_parent = new ColumnParent(); struct.column_parent.read(iprot); struct.setColumn_parentIsSet(true); org.apache.thrift.protocol.TList _list221 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); struct.column_slices = new ArrayList<ColumnSlice>(_list221.size); for (int _i222 = 0; _i222 < _list221.size; ++_i222) struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); struct.setConsistency_levelIsSet(true);
optionals.set(5); oprot.writeBitSet(optionals, 6); if (struct.isSetKey()) { oprot.writeBinary(struct.key); struct.column_parent.write(oprot); oprot.writeI32(struct.column_slices.size()); for (ColumnSlice _iter220 : struct.column_slices) oprot.writeI32(struct.consistency_level.getValue());
SlicePredicate predicate; if (fields == null) { predicate = new SlicePredicate().setSlice_range(new SliceRange( EMPTY_BYTE_BUFFER, EMPTY_BYTE_BUFFER, false, 1000000)); predicate = new SlicePredicate().setColumn_names(fieldlist); System.out.println(); System.out .println("ConsistencyLevel=" + readConsistencyLevel.toString());
"range", range.toString(), "start_column", ByteBufferUtil.bytesToHex(start_column), "consistency_level", consistency_level.name()); Tracing.instance.begin("get_paged_slice", traceParameters); consistencyLevel.validateForRead(keyspace); SlicePredicate predicate = new SlicePredicate().setSlice_range(new SliceRange(start_column, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, -1)); return thriftifyKeySlices(rows, new ColumnParent(column_family), predicate, now);
"column_parent", column_parent.toString(), "predicate", predicate.toString(), "consistency_level", consistency_level.name()); Tracing.instance.begin("get_count", traceParameters); ClusteringIndexFilter filter; CFMetaData metadata = cfs.metadata; if (metadata.isSuper() && !column_parent.isSetSuper_column()) DataLimits limits = getLimits(1, metadata.isSuper() && !column_parent.isSetSuper_column(), predicate); DecoratedKey dk = metadata.decorateKey(key);
parent = new ColumnParent(columnFamily); .valueOf(getProperties().getProperty(READ_CONSISTENCY_LEVEL_PROPERTY, READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); writeConsistencyLevel = ConsistencyLevel .valueOf(getProperties().getProperty(WRITE_CONSISTENCY_LEVEL_PROPERTY, WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); scanConsistencyLevel = ConsistencyLevel .valueOf(getProperties().getProperty(SCAN_CONSISTENCY_LEVEL_PROPERTY, SCAN_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); deleteConsistencyLevel = ConsistencyLevel .valueOf(getProperties().getProperty(DELETE_CONSISTENCY_LEVEL_PROPERTY, DELETE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT));
builder.put("old", expected.toString()); builder.put("updates", updates.toString()); builder.put("consistency_level", commit_consistency_level.name()); builder.put("serial_consistency_level", serial_consistency_level.name()); Map<String,String> traceParameters = builder.build(); ThriftValidation.validateColumnNames(metadata, new ColumnParent(column_family), names); for (Column column : updates) ThriftValidation.validateColumnData(metadata, null, column);
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException { long queryStartNanoTime = System.nanoTime(); if (startSessionIfRequested()) { Map<String, String> traceParameters = ImmutableMap.of("key", ByteBufferUtil.bytesToHex(key), "column_parent", column_parent.toString(), "column", column.toString(), "consistency_level", consistency_level.name()); Tracing.instance.begin("insert", traceParameters); } else { logger.trace("insert"); } try { internal_insert(key, column_parent, column, consistency_level, queryStartNanoTime); } catch (RequestValidationException e) { throw ThriftConversion.toThrift(e); } finally { Tracing.instance.stopSession(); } }
"column_parent", request.column_parent.toString(), "consistency_level", request.consistency_level.name(), "count", String.valueOf(request.count), "column_slices", request.column_slices.toString()); SliceQueryFilter filter = new SliceQueryFilter(deoverlapped, request.reversed, request.count); ThriftValidation.validateKey(metadata, request.key); commands.add(ReadCommand.create(keyspace, request.key, request.column_parent.getColumn_family(), System.currentTimeMillis(), filter)); return getSlice(commands, request.column_parent.isSetSuper_column(), consistencyLevel, cState).entrySet().iterator().next().getValue();
connectionProps.setProperty(TAG_ACTIVE_CQL_VERSION, version); majorCqlVersion = getMajor(version); defaultConsistencyLevel = ConsistencyLevel.valueOf(props.getProperty(TAG_CONSISTENCY_LEVEL,ConsistencyLevel.ONE.name())); Object[] args = {host, port,currentKeyspace,cluster,version, defaultConsistencyLevel.name()}; logger.debug("Connected to {}:{} in Cluster '{}' using Keyspace '{}', CQL version '{}' and Consistency level {}",args);
@Override public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(String kvsMethodName, TableReference tableRef, List<ByteBuffer> keys, SlicePredicate predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException { int numberOfKeys = keys.size(); int numberOfColumns = predicate.slice_range.count; long startTime = System.currentTimeMillis(); return KvsProfilingLogger.maybeLog( (KvsProfilingLogger.CallableCheckedException<Map<ByteBuffer, List<ColumnOrSuperColumn>>, TException>) () -> client.multiget_slice(kvsMethodName, tableRef, keys, predicate, consistency_level), (logger, timer) -> logger.log("CassandraClient.multiget_slice({}, {}, {}, {}) at time {}, on kvs.{} took {} ms", LoggingArgs.tableRef(tableRef), LoggingArgs.rowCount(numberOfKeys), LoggingArgs.columnCount(numberOfColumns), SafeArg.of("consistency", consistency_level.toString()), LoggingArgs.startTimeMillis(startTime), SafeArg.of("kvsMethodName", kvsMethodName), LoggingArgs.durationMillis(timer)), (logger, rowsToColumns) -> logger.log("and returned {} cells in {} rows with {} bytes", LoggingArgs.cellCount( rowsToColumns.values().stream().mapToInt(value -> value.size()).sum()), LoggingArgs.rowCount(rowsToColumns.size()), LoggingArgs.sizeInBytes(ThriftObjectSizeUtils.getApproximateSizeOfColsByKey(rowsToColumns)))); }
if (!(this_present_column_parent && that_present_column_parent)) return false; if (!this.column_parent.equals(that.column_parent)) return false; if (!(this_present_consistency_level && that_present_consistency_level)) return false; if (!this.consistency_level.equals(that.consistency_level)) return false;
@Override public int hashCode() { HashCodeBuilder builder = new HashCodeBuilder(); boolean present_key = true && (isSetKey()); builder.append(present_key); if (present_key) builder.append(key); boolean present_column_parent = true && (isSetColumn_parent()); builder.append(present_column_parent); if (present_column_parent) builder.append(column_parent); boolean present_column_slices = true && (isSetColumn_slices()); builder.append(present_column_slices); if (present_column_slices) builder.append(column_slices); boolean present_reversed = true && (isSetReversed()); builder.append(present_reversed); if (present_reversed) builder.append(reversed); boolean present_count = true && (isSetCount()); builder.append(present_count); if (present_count) builder.append(count); boolean present_consistency_level = true && (isSetConsistency_level()); builder.append(present_consistency_level); if (present_consistency_level) builder.append(consistency_level.getValue()); return builder.toHashCode(); }
public void read(org.apache.thrift.protocol.TProtocol iprot, MultiSliceRequest struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) schemeField = iprot.readFieldBegin(); if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { break; case 1: // KEY if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.key = iprot.readBinary(); struct.setKeyIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); struct.column_parent = new ColumnParent(); struct.column_parent.read(iprot); struct.setColumn_parentIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); case 6: // CONSISTENCY_LEVEL if (schemeField.type == org.apache.thrift.protocol.TType.I32) { struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); struct.setConsistency_levelIsSet(true); } else {