public void write(DataOutput out) throws IOException { Bytes.writeByteArray(out, this.m_tableName.getName()); Bytes.writeByteArray(out, this.m_startRow); Bytes.writeByteArray(out, this.m_endRow); Bytes.writeByteArray(out, Bytes.toBytes(this.m_regionLocation)); }
/** * Writes the field values to the output. * * @param out The output to write to. * @throws IOException When writing the values to the output fails. */ @Override public void write(DataOutput out) throws IOException { WritableUtils.writeVInt(out, VERSION.code); Bytes.writeByteArray(out, tableName.getName()); Bytes.writeByteArray(out, startRow); Bytes.writeByteArray(out, endRow); Bytes.writeByteArray(out, Bytes.toBytes(regionLocation)); Bytes.writeByteArray(out, Bytes.toBytes(scan)); WritableUtils.writeVLong(out, length); Bytes.writeByteArray(out, Bytes.toBytes(encodedRegionName)); }
@Override public void write(DataOutput out) throws IOException { super.write(out); // Explicitly writing null to maintain se/deserialize backward compatibility. Bytes.writeByteArray(out, table == null ? null : table.getName()); out.writeBoolean(family != null); if (family != null) { Bytes.writeByteArray(out, family); } out.writeBoolean(qualifier != null); if (qualifier != null) { Bytes.writeByteArray(out, qualifier); } } }
@Override public void write(DataOutput out) throws IOException { super.write(out); Bytes.writeByteArray(out, Bytes.toBytes(namespace)); } }
/** * Writes this chunk into the given output stream in the root block index * format. This format is similar to the {@link HFile} version 1 block * index format, except that we store on-disk size of the block instead of * its uncompressed size. * * @param out the data output stream to write the block index to. Typically * a stream writing into an {@link HFile} block. * @throws IOException */ void writeRoot(DataOutput out) throws IOException { for (int i = 0; i < blockKeys.size(); ++i) { out.writeLong(blockOffsets.get(i)); out.writeInt(onDiskDataSizes.get(i)); Bytes.writeByteArray(out, blockKeys.get(i)); } }
/** * Write byte-array with a WritableableUtils.vint prefix. * @param out output stream to be written to * @param b array to write * @throws IOException e */ public static void writeByteArray(final DataOutput out, final byte [] b) throws IOException { if(b == null) { WritableUtils.writeVInt(out, 0); } else { writeByteArray(out, b, 0, b.length); } }
/** * Write the number of regions moved in the first line followed by regions moved in subsequent * lines */ private void writeFile(String filename, List<RegionInfo> movedRegions) throws IOException { try (DataOutputStream dos = new DataOutputStream( new BufferedOutputStream(new FileOutputStream(filename)))) { dos.writeInt(movedRegions.size()); for (RegionInfo region : movedRegions) { Bytes.writeByteArray(dos, RegionInfo.toByteArray(region)); } } catch (IOException e) { LOG.error( "ERROR: Was Not able to write regions moved to output file but moved " + movedRegions .size() + " regions", e); throw e; } }
/** * This is modeled after {@link CompoundBloomFilterWriter.MetaWriter} for simplicity, * although the two metadata formats do not have to be consistent. This * does have to be consistent with how {@link * CompoundBloomFilter#CompoundBloomFilter(DataInput, * org.apache.hadoop.hbase.io.hfile.HFile.Reader)} reads fields. */ @Override public void write(DataOutput out) throws IOException { out.writeInt(VERSION); out.writeLong(getByteSize()); out.writeInt(prevChunk.getHashCount()); out.writeInt(prevChunk.getHashType()); out.writeLong(getKeyCount()); out.writeLong(getMaxKeys()); // Fields that don't have equivalents in ByteBloomFilter. out.writeInt(numChunks); if (comparator != null) { Bytes.writeByteArray(out, Bytes.toBytes(comparator.getClass().getName())); } else { // Internally writes a 0 vint if the byte[] is null Bytes.writeByteArray(out, null); } // Write a single-level index without compression or block header. bloomBlockIndexWriter.writeSingleLevelIndex(out, "Bloom filter"); } }
@Override public void write(DataOutput out) throws IOException { TableSnapshotRegionSplit.Builder builder = TableSnapshotRegionSplit.newBuilder() .setTable(ProtobufUtil.toTableSchema(htd)) .setRegion(HRegionInfo.convert(regionInfo)); for (String location : locations) { builder.addLocations(location); } TableSnapshotRegionSplit split = builder.build(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); split.writeTo(baos); baos.close(); byte[] buf = baos.toByteArray(); out.writeInt(buf.length); out.write(buf); Bytes.writeByteArray(out, Bytes.toBytes(scan)); Bytes.writeByteArray(out, Bytes.toBytes(restoreDir)); }
@Override public void write(DataOutput output) throws IOException { super.write(output); Bytes.writeByteArray(output, cf); Bytes.writeByteArray(output, cq); }
@Override public void write(DataOutput output) throws IOException { super.write(output); Bytes.writeByteArray(output, cf); Bytes.writeByteArray(output, cq); }
@Override public void write(DataOutput out) throws IOException { super.write(out); Bytes.writeByteArray(out, user); } }
@Override public void write(DataOutput out) throws IOException { super.write(out); // Explicitly writing null to maintain se/deserialize backward compatibility. Bytes.writeByteArray(out, (table == null) ? null : table.getName()); out.writeBoolean(family != null); if (family != null) { Bytes.writeByteArray(out, family); } out.writeBoolean(qualifier != null); if (qualifier != null) { Bytes.writeByteArray(out, qualifier); } out.writeBoolean(namespace != null); if(namespace != null) { Bytes.writeByteArray(out, Bytes.toBytes(namespace)); } } }
@Override public void write(DataOutput output) throws IOException { super.write(output); schema.write(output); output.writeInt(position); Bytes.writeByteArray(output, name); }
@Override public void write(DataOutput output) throws IOException { super.write(output); try { output.writeBoolean(allCFs); if (!allCFs) { Bytes.writeByteArray(output, essentialCF); } } catch (Throwable t) { // Catches incompatibilities during reading/writing and doesn't retry ServerUtil.throwIOException("MultiKeyValueComparisonFilter failed during writing", t); } }
/** * Internal write the underlying data for the entry - this does not do any special prefixing. Writing should be done * via {@link KeyValueCodec#write(DataOutput, KeyValue)} to ensure consistent reading/writing of * {@link IndexedKeyValue}s. * * @param out * to write data to. Does not close or flush the passed object. * @throws IOException * if there is a problem writing the underlying data */ void writeData(DataOutput out) throws IOException { Bytes.writeByteArray(out, this.indexTableName.get()); out.writeUTF(this.mutation.getClass().getName()); this.mutation.write(out); }
/** * Internal write the underlying data for the entry - this does not do any special prefixing. Writing should be done * via {@link KeyValueCodec#write(DataOutput, KeyValue)} to ensure consistent reading/writing of * {@link IndexedKeyValue}s. * * @param out * to write data to. Does not close or flush the passed object. * @throws IOException * if there is a problem writing the underlying data */ void writeData(DataOutput out) throws IOException { Bytes.writeByteArray(out, this.indexTableName.get()); MutationProto m = toMutationProto(this.mutation); Bytes.writeByteArray(out, m.toByteArray()); }
private void serializeViewConstantsIntoScan(byte[][] viewConstants, Scan scan) { ByteArrayOutputStream stream = new ByteArrayOutputStream(); try { DataOutputStream output = new DataOutputStream(stream); WritableUtils.writeVInt(output, viewConstants.length); for (byte[] viewConstant : viewConstants) { Bytes.writeByteArray(output, viewConstant); } scan.setAttribute(BaseScannerRegionObserver.VIEW_CONSTANTS, stream.toByteArray()); } catch (IOException e) { throw new RuntimeException(e); } finally { try { stream.close(); } catch (IOException e) { throw new RuntimeException(e); } } }
@Override public void write(DataOutput output) throws IOException { if (regionGuidePosts == null) { WritableUtils.writeVInt(output, 0); return; } WritableUtils.writeVInt(output, regionGuidePosts.size()); for (Entry<String, byte[][]> entry : regionGuidePosts.entrySet()) { WritableUtils.writeString(output, entry.getKey()); byte[][] value = entry.getValue(); WritableUtils.writeVInt(output, value.length); for (int i=0; i<value.length; i++) { Bytes.writeByteArray(output, value[i]); } } } }
@Override public void write(DataOutput output) throws IOException { Bytes.writeByteArray(output, name.getBytes()); Bytes.writeByteArray(output, familyName == null ? ByteUtil.EMPTY_BYTE_ARRAY : familyName.getBytes()); WritableUtils.writeVInt(output, dataType.ordinal()); WritableUtils.writeVInt(output, maxLength == null ? NO_MAXLENGTH : maxLength); WritableUtils.writeVInt(output, scale == null ? NO_SCALE : scale); output.writeBoolean(nullable); WritableUtils.writeVInt(output, position); WritableUtils.writeVInt(output, ColumnModifier.toSystemValue(columnModifier)); }