/** Private constructor. */ private AppendRequest(final byte[] table, final byte[] key, final byte[] family, final byte[] qualifier, final byte[] value, final long timestamp, final long lockid) { this(table, key, family, new byte[][] { qualifier }, new byte[][] { value }, timestamp, lockid, false); }
super(table, key, family == null ? WHOLE_ROW : family, timestamp, lockid); if (family != null) { KeyValue.checkFamily(family); KeyValue.checkQualifier(qualifier);
/** * Generate a tree structure that ElasticSearch can read and index from one of the rows that has been returned from * HBase. * * @param row * @return */ @SuppressWarnings("unchecked") protected Map<String, Object> readDataTree(final ArrayList<KeyValue> row) { final Map<String, Object> dataTree = new HashMap<String, Object>(); for (final KeyValue column : row) { final String family = this.river.normalizeField(new String(column.family(), this.river.getCharset())); final String qualifier = new String(column.qualifier(), this.river.getCharset()); final String value = new String(column.value(), this.river.getCharset()); if (!dataTree.containsKey(family)) { dataTree.put(family, new HashMap<String, Object>()); } readQualifierStructure((Map<String, Object>) dataTree.get(family), qualifier, value); } return dataTree; }
public Object call(final Object response) { ArrayList<ArrayList<KeyValue>> rows = null; Response resp = null; if (response instanceof Response) { // HBase 0.95 and up resp = (Response) response; rows = resp.rows; scannerClosedOnServer = resp.scannerClosedOnServer; moreRows = resp.more; } else if (response instanceof ArrayList) { // HBase 0.94 and before. @SuppressWarnings("unchecked") // I 3>> generics. final ArrayList<ArrayList<KeyValue>> r = (ArrayList<ArrayList<KeyValue>>) response; rows = r; } else if (response != null) { throw new InvalidResponseException(ArrayList.class, response); } if (rows == null) { // We're done scanning this region. return scanFinished(resp != null && !resp.more); } final ArrayList<KeyValue> lastrow = rows.get(rows.size() - 1); start_key = lastrow.get(0).key(); return rows; } public String toString() {
/** * Package private helper to access the last timestamp in an HBase row. * * @param metric_width The number of bytes on which metric IDs are stored. * @param row A compacted HBase row. * @return A strictly positive 32-bit timestamp. * @throws IllegalArgumentException if {@code row} doesn't contain any cell. */ static long lastTimestampInRow(final short metric_width, final KeyValue row) { final long base_time = Bytes.getUnsignedInt(row.key(), metric_width); final byte[] qual = row.qualifier(); final short last_delta = (short) (Bytes.getUnsignedShort(qual, qual.length - 2) >>> Const.FLAG_BITS); return base_time + last_delta; }
/** * Creates a new {@link RegionInfo} from a META {@link KeyValue}. * @param kv The {@link KeyValue} to use, which is assumed to be from * the cell {@code info:regioninfo} of a {@code .META.} region. * @param out_start_key A (@code {new byte[1][]}). * The start row of the region will be stored in {@code out_start_key[0]}. * Think "pointer-to-pointer" in Java (yeah!). * @return A newly created {@link RegionInfo}. * If calling {@link #table} on the object returned gives a reference to * {@link HBaseClient#EMPTY_ARRAY}, then the META entry indicates that the * region has been split (and thus this entry shouldn't be used). * @throws RegionOfflineException if the META entry indicates that the * region is offline. * @throws BrokenMetaException if the {@link KeyValue} seems invalid. */ static RegionInfo fromKeyValue(final KeyValue kv, final byte[][] out_start_key) { switch (kv.value()[0]) { case 0: // pre 0.92 -- fall through. case 1: // 0.92 to 0.94 return deserializeOldRegionInfo(kv, out_start_key); case 80: // 0.95+ return deserializeProtobufRegionInfo(kv, out_start_key); default: throw new IllegalStateException("Unsupported region info version: " + kv.value()[0] + " in .META. entry: " + kv); } }
/** * @see CompactionQueue#complexCompact */ public static KeyValue complexCompact(final KeyValue kv) { final ArrayList<KeyValue> kvs = new ArrayList<KeyValue>(1); kvs.add(kv); return CompactionQueue.complexCompact(kvs, kv.qualifier().length / 2); }
invalid("rowkey_length=" + key_length + " doesn't match key_length + family_length (" + key_length + " + " + family_length + " +12) in " + buf + '=' invalid("2 + rl:" + key_length + " + 1 + fl:" + family_length + " + ql:" + qual_length + " + 8 + 1" + " != kl:" + rowkey_length); return new KeyValue(key, family, qualifier, timestamp, /*key_type,*/ value); } else { return new KeyValue(Bytes.deDup(prev.key, key), Bytes.deDup(prev.family, family), Bytes.deDup(prev.qualifier, qualifier),
/** * Transforms a protobuf Cell message into a KeyValue (HBase 0.95+). * @param buf The buffer to de-serialize from. * @param prev Another {@link KeyValue} previously de-serialized from the * same buffer. Can be {@code null}. The idea here is that KeyValues * often come in a sorted batch, and often share a number of byte arrays * (e.g. they all have the same row key and/or same family...). When * you specify another KeyValue, its byte arrays will be re-used in order * to avoid having too much duplicate data in memory. This costs a little * bit of CPU time to compare the arrays but saves memory (which in turns * saves CPU time later). * @return a new instance (guaranteed non-{@code null}). */ static KeyValue fromCell(final CellPB.Cell cell, final KeyValue prev) { final byte[] key = Bytes.get(cell.getRow()); final byte[] family = Bytes.get(cell.getFamily()); final byte[] qualifier = Bytes.get(cell.getQualifier()); final long timestamp = cell.getTimestamp(); final byte[] value = Bytes.get(cell.getValue()); if (prev == null) { return new KeyValue(key, family, qualifier, timestamp, /*key_type,*/ value); } else { return new KeyValue(Bytes.deDup(prev.key, key), Bytes.deDup(prev.family, family), Bytes.deDup(prev.qualifier, qualifier), timestamp, /*key_type,*/ value); } }
/** Private constructor. */ private PutRequest(final byte[] table, final byte[] key, final byte[] family, final byte[] qualifier, final byte[] value, final long timestamp, final long lockid) { this(table, key, family, new byte[][] { qualifier }, new byte[][] { value }, timestamp, lockid); }
@Override Object deserialize(final ChannelBuffer buf, int cell_size) { final MutateResponse resp = readProtobuf(buf, MutateResponse.PARSER); // An increment must always produce a result, so we shouldn't need to // check whether the `result' field is set here. final ArrayList<KeyValue> kvs = GetRequest.convertResult(resp.getResult(), buf, cell_size); Map<byte[], Long> updatedValues = Maps.newHashMap(); for (KeyValue kv : kvs) { updatedValues.put(kv.qualifier(), Bytes.getLong(kv.value())); } return updatedValues; }
for (final ArrayList<KeyValue> row : rows) { buf.setLength(0); final byte[] key = row.get(0).key(); final long base_time = Internal.baseTime(tsdb, key); final String metric = Internal.metricName(tsdb, key);
@Override Object deserialize(final ChannelBuffer buf, int cell_size) { final MutateResponse resp = readProtobuf(buf, MutateResponse.PARSER); // An increment must always produce a result, so we shouldn't need to // check whether the `result' field is set here. final ArrayList<KeyValue> kvs = GetRequest.convertResult(resp.getResult(), buf, cell_size); if (kvs.size() != 1) { throw new InvalidResponseException("Atomic increment returned " + kvs.size() + " KeyValue(s), but we expected exactly one. kvs=" + kvs, resp); } return Bytes.getLong(kvs.get(0).value()); }