@Override public ByteBuffer getValue(Cell element) { return org.apache.cassandra.utils.ByteBufferUtil.clone(element.value()); }
@Override public ByteBuffer getColumn(Cell element) { return org.apache.cassandra.utils.ByteBufferUtil.clone(element.name().toByteBuffer()); }
@Override public ByteBuffer getValue(Cell element) { return org.apache.cassandra.utils.ByteBufferUtil.clone(element.value()); }
@Override public ByteBuffer getValue(Column element) { return org.apache.cassandra.utils.ByteBufferUtil.clone(element.value()); }
@Override public ByteBuffer getColumn(Column element) { return org.apache.cassandra.utils.ByteBufferUtil.clone(element.name()); }
private static Object toJsonValue(Object o) { if (o instanceof UUID) return o.toString(); if (o instanceof Date) return ((Date)o).getTime(); if (o instanceof ByteBuffer) { // encode byte[] as Base64 encoded string ByteBuffer bb = ByteBufferUtil.clone((ByteBuffer)o); return Base64.getEncoder().encodeToString(ByteBufferUtil.getArray((ByteBuffer)o)); } if (o instanceof InetAddress) return InetAddresses.toAddrString((InetAddress)o); return o; }
private byte[] getBytes(TypedColumn column) throws SQLException { checkNotClosed(); ByteBuffer value = (ByteBuffer) column.getRawColumn().value; wasNull = value == null; return value == null ? null : ByteBufferUtil.clone(value).array(); }
@Override public ByteBuffer getColumn(Cell element) { return org.apache.cassandra.utils.ByteBufferUtil.clone(element.name().toByteBuffer()); }
/** check whether start to read a new CF row by comparing the partition keys */ private boolean newRow(Map<String, ByteBuffer> keyColumns, String previousRowKey) { if (keyColumns.isEmpty()) return false; String rowKey = ""; if (keyColumns.size() == 1) { rowKey = partitionBoundColumns.get(0).validator.getString(keyColumns.get(partitionBoundColumns.get(0).name)); } else { Iterator<ByteBuffer> iter = keyColumns.values().iterator(); for (BoundColumn column : partitionBoundColumns) rowKey = rowKey + column.validator.getString(ByteBufferUtil.clone(iter.next())) + ":"; } logger.debug("previous RowKey: {}, new row key: {}", previousRowKey, rowKey); if (previousRowKey == null) { this.previousRowKey = rowKey; return true; } if (rowKey.equals(previousRowKey)) return false; this.previousRowKey = rowKey; return true; }
private List<ReadCommand> getSliceCommands(QueryOptions options, int limit, long now) throws RequestValidationException { Collection<ByteBuffer> keys = getKeys(options); if (keys.isEmpty()) // in case of IN () for (the last column of) the partition key. return null; List<ReadCommand> commands = new ArrayList<>(keys.size()); IDiskAtomFilter filter = makeFilter(options, limit); if (filter == null) return null; // Note that we use the total limit for every key, which is potentially inefficient. // However, IN + LIMIT is not a very sensible choice. for (ByteBuffer key : keys) { QueryProcessor.validateKey(key); // We should not share the slice filter amongst the commands (hence the cloneShallow), due to // SliceQueryFilter not being immutable due to its columnCounter used by the lastCounted() method // (this is fairly ugly and we should change that but that's probably not a tiny refactor to do that cleanly) commands.add(ReadCommand.create(keyspace(), ByteBufferUtil.clone(key), columnFamily(), now, filter.cloneShallow())); } return commands; }
private ReadQuery getSliceCommands(QueryOptions options, DataLimits limit, int nowInSec) throws RequestValidationException { Collection<ByteBuffer> keys = restrictions.getPartitionKeys(options); if (keys.isEmpty()) return ReadQuery.EMPTY; ClusteringIndexFilter filter = makeClusteringIndexFilter(options); if (filter == null) return ReadQuery.EMPTY; RowFilter rowFilter = getRowFilter(options); // Note that we use the total limit for every key, which is potentially inefficient. // However, IN + LIMIT is not a very sensible choice. List<SinglePartitionReadCommand> commands = new ArrayList<>(keys.size()); for (ByteBuffer key : keys) { QueryProcessor.validateKey(key); DecoratedKey dk = cfm.decorateKey(ByteBufferUtil.clone(key)); ColumnFilter cf = (cfm.isSuper() && cfm.isDense()) ? SuperColumnCompatibility.getColumnFilter(cfm, options, restrictions.getSuperColumnRestrictions()) : queriedColumns; commands.add(SinglePartitionReadCommand.create(cfm, nowInSec, cf, rowFilter, limit, dk, filter)); } return new SinglePartitionReadCommand.Group(commands, limit); }
private ReadQuery getSliceCommands(QueryOptions options, DataLimits limit, int nowInSec) throws RequestValidationException { Collection<ByteBuffer> keys = restrictions.getPartitionKeys(options); if (keys.isEmpty()) return ReadQuery.EMPTY; ClusteringIndexFilter filter = makeClusteringIndexFilter(options); if (filter == null) return ReadQuery.EMPTY; RowFilter rowFilter = getRowFilter(options); // Note that we use the total limit for every key, which is potentially inefficient. // However, IN + LIMIT is not a very sensible choice. List<SinglePartitionReadCommand> commands = new ArrayList<>(keys.size()); for (ByteBuffer key : keys) { QueryProcessor.validateKey(key); DecoratedKey dk = cfm.decorateKey(ByteBufferUtil.clone(key)); ColumnFilter cf = (cfm.isSuper() && cfm.isDense()) ? SuperColumnCompatibility.getColumnFilter(cfm, options, restrictions.getSuperColumnRestrictions()) : queriedColumns; commands.add(SinglePartitionReadCommand.create(cfm, nowInSec, cf, rowFilter, limit, dk, filter)); } return new SinglePartitionReadCommand.Group(commands, limit); }
private ReadQuery getSliceCommands(QueryOptions options, DataLimits limit, int nowInSec) throws RequestValidationException { Collection<ByteBuffer> keys = restrictions.getPartitionKeys(options); if (keys.isEmpty()) return ReadQuery.EMPTY; ClusteringIndexFilter filter = makeClusteringIndexFilter(options); if (filter == null) return ReadQuery.EMPTY; RowFilter rowFilter = getRowFilter(options); // Note that we use the total limit for every key, which is potentially inefficient. // However, IN + LIMIT is not a very sensible choice. List<SinglePartitionReadCommand> commands = new ArrayList<>(keys.size()); for (ByteBuffer key : keys) { QueryProcessor.validateKey(key); DecoratedKey dk = cfm.decorateKey(ByteBufferUtil.clone(key)); ColumnFilter cf = (cfm.isSuper() && cfm.isDense()) ? SuperColumnCompatibility.getColumnFilter(cfm, options, restrictions.getSuperColumnRestrictions()) : queriedColumns; commands.add(SinglePartitionReadCommand.create(cfm, nowInSec, cf, rowFilter, limit, dk, filter)); } return new SinglePartitionReadCommand.Group(commands, limit); }
public static ColumnDefinition fromThrift(String ksName, String cfName, AbstractType<?> thriftComparator, AbstractType<?> thriftSubcomparator, ColumnDef thriftColumnDef) throws SyntaxException, ConfigurationException { boolean isSuper = thriftSubcomparator != null; // For super columns, the componentIndex is 1 because the ColumnDefinition applies to the column component. AbstractType<?> comparator = thriftSubcomparator == null ? thriftComparator : thriftSubcomparator; try { comparator.validate(thriftColumnDef.name); } catch (MarshalException e) { throw new ConfigurationException(String.format("Column name %s is not valid for comparator %s", ByteBufferUtil.bytesToHex(thriftColumnDef.name), comparator)); } // In our generic layout, we store thrift defined columns as static, but this doesn't work for super columns so we // use a regular definition (and "dynamic" columns are handled in a map). ColumnDefinition.Kind kind = isSuper ? ColumnDefinition.Kind.REGULAR : ColumnDefinition.Kind.STATIC; return new ColumnDefinition(ksName, cfName, ColumnIdentifier.getInterned(ByteBufferUtil.clone(thriftColumnDef.name), comparator), TypeParser.parse(thriftColumnDef.validation_class), ColumnDefinition.NO_POSITION, kind); }
public static ColumnDefinition fromThrift(String ksName, String cfName, AbstractType<?> thriftComparator, AbstractType<?> thriftSubcomparator, ColumnDef thriftColumnDef) throws SyntaxException, ConfigurationException { boolean isSuper = thriftSubcomparator != null; // For super columns, the componentIndex is 1 because the ColumnDefinition applies to the column component. AbstractType<?> comparator = thriftSubcomparator == null ? thriftComparator : thriftSubcomparator; try { comparator.validate(thriftColumnDef.name); } catch (MarshalException e) { throw new ConfigurationException(String.format("Column name %s is not valid for comparator %s", ByteBufferUtil.bytesToHex(thriftColumnDef.name), comparator)); } // In our generic layout, we store thrift defined columns as static, but this doesn't work for super columns so we // use a regular definition (and "dynamic" columns are handled in a map). ColumnDefinition.Kind kind = isSuper ? ColumnDefinition.Kind.REGULAR : ColumnDefinition.Kind.STATIC; return new ColumnDefinition(ksName, cfName, ColumnIdentifier.getInterned(ByteBufferUtil.clone(thriftColumnDef.name), comparator), TypeParser.parse(thriftColumnDef.validation_class), ColumnDefinition.NO_POSITION, kind); }
cd.setName(ByteBufferUtil.clone(column.name.bytes)); cd.setValidation_class(column.type.toString());
cd.setName(ByteBufferUtil.clone(column.name.bytes)); cd.setValidation_class(column.type.toString());
if (!type.equals("regular")) continue; cDef.setName(ByteBufferUtil.clone(row.getColumns().get(0).value)); cDef.validation_class = ByteBufferUtil.string(row.getColumns().get(1).value); ByteBuffer indexType = row.getColumns().get(2).value;
public static ColumnDefinition fromThrift(String ksName, String cfName, AbstractType<?> thriftComparator, AbstractType<?> thriftSubcomparator, ColumnDef thriftColumnDef) throws SyntaxException, ConfigurationException { // For super columns, the componentIndex is 1 because the ColumnDefinition applies to the column component. Integer componentIndex = thriftSubcomparator != null ? 1 : null; AbstractType<?> comparator = thriftSubcomparator == null ? thriftComparator : thriftSubcomparator; try { comparator.validate(thriftColumnDef.name); } catch (MarshalException e) { throw new ConfigurationException(String.format("Column name %s is not valid for comparator %s", ByteBufferUtil.bytesToHex(thriftColumnDef.name), comparator)); } return new ColumnDefinition(ksName, cfName, new ColumnIdentifier(ByteBufferUtil.clone(thriftColumnDef.name), comparator), TypeParser.parse(thriftColumnDef.validation_class), thriftColumnDef.index_type == null ? null : IndexType.valueOf(thriftColumnDef.index_type.name()), thriftColumnDef.index_options, thriftColumnDef.index_name, componentIndex, Kind.REGULAR); }
public ColumnDef toThrift() { ColumnDef cd = new ColumnDef(); cd.setName(ByteBufferUtil.clone(name.bytes)); cd.setValidation_class(type.toString()); cd.setIndex_type(indexType == null ? null : org.apache.cassandra.thrift.IndexType.valueOf(indexType.name())); cd.setIndex_name(indexName == null ? null : indexName); cd.setIndex_options(indexOptions == null ? null : Maps.newHashMap(indexOptions)); return cd; }