/** * Updates the Header of this BTreePage to reflect that it contains the given number of items. This does not perform * any resizing. * * @param itemCount The count to set. */ private void setCount(int itemCount) { BitConverter.writeInt(this.header, COUNT_OFFSET, itemCount); this.count = itemCount; }
/** * Updates the Header to contain the given id. */ private void setHeaderId(int id) { BitConverter.writeInt(this.header, ID_OFFSET, id); }
/** * Updates the Footer to contain the given id. */ private void setFooterId(int id) { BitConverter.writeInt(this.footer, 0, id); }
@Override public void length(int length) throws IOException { if (requiresExplicitLength()) { BitConverter.writeInt(this.realStream, length); super.out = this.realStream; this.length = length; } }
public byte[] toBytes() { byte[] b = new byte[Integer.BYTES + Long.BYTES]; BitConverter.writeInt(b, 0, msb); BitConverter.writeLong(b, Integer.BYTES, lsb); return b; }
/** * Format: [Header][Key][Length][Contents] * * [Header]: is a sequence of bytes identifying the start of an append * * [OwnerId]: the owning Stream/Segment id * * [RoutingKey]: the event's routing key * * [Sequence]: the event's sequence number * * [StartTime]: the event's start time. * * [Length]: length of [Contents] * * [Contents]: a deterministic result of [Key] & [Length]. */ private byte[] serialize(int length) { Preconditions.checkArgument(length >= HEADER_LENGTH, "length is insufficient to accommodate header."); byte[] payload = new byte[length]; // Header: PREFIX + ownerId + routingKey + sequence + start time + Key + Length int offset = 0; offset += BitConverter.writeInt(payload, offset, PREFIX); offset += BitConverter.writeInt(payload, offset, this.ownerId); offset += BitConverter.writeInt(payload, offset, this.routingKey); offset += BitConverter.writeInt(payload, offset, this.sequence); offset += BitConverter.writeLong(payload, offset, this.startTime); int contentLength = length - HEADER_LENGTH; offset += BitConverter.writeInt(payload, offset, contentLength); assert offset == HEADER_LENGTH : "Event header has a different length than expected"; // Content writeContent(payload, offset); return payload; }
/** * Creates a new instance of the RandomRevisionDataOutput class. Upon a successful call to this constructor, 4 bytes * will have been written to the OutputStream representing a placeholder for the length. These 4 bytes will be populated * upon closing this OutputStream. * * @param outputStream The OutputStream to wrap. * @throws IOException If an IO Exception occurred. */ RandomRevisionDataOutput(OutputStream outputStream) throws IOException { super(outputStream); // Pre-allocate 4 bytes so we can write the length later, but remember this position. this.initialPosition = ((RandomAccessOutputStream) outputStream).size(); BitConverter.writeInt(outputStream, 0); }
@Override CompletableFuture<Void> createSegmentSealedEpochRecordData(long segmentToSeal, int epoch) { String path = String.format(segmentSealedEpochPathFormat, segmentToSeal); byte[] epochData = new byte[Integer.BYTES]; BitConverter.writeInt(epochData, 0, epoch); return Futures.toVoid(store.createZNodeIfNotExist(path, epochData)); }
@Override CompletableFuture<Void> createSegmentSealedEpochRecordData(long segment, int epoch) { Preconditions.checkNotNull(epoch); byte[] array = new byte[Integer.BYTES]; BitConverter.writeInt(array, 0, epoch); synchronized (lock) { segmentSealingEpochs.putIfAbsent(segment, new Data(array, new Version.IntVersion(0))); } return CompletableFuture.completedFuture(null); }
/** * Writes the given 32-bit Integer to the given ArrayView at the given offset. * * @param target The ArrayView to write to. * @param offset The offset within the ArrayView to write at. * @param value The value to write. * @return The number of bytes written. */ public static int writeInt(ArrayView target, int offset, int value) { return writeInt(target.array(), target.arrayOffset() + offset, value); }
@Override public void close() throws IOException { // Calculate the number of bytes written, making sure to exclude the bytes for the length encoding. RandomAccessOutputStream ros = (RandomAccessOutputStream) this.out; int length = ros.size() - this.initialPosition - Integer.BYTES; // Write the length at the appropriate position. BitConverter.writeInt(ros.subStream(this.initialPosition, Integer.BYTES), length); }
private ByteArraySegment getFooter(long rootPageOffset, int rootPageLength) { byte[] result = new byte[FOOTER_LENGTH]; BitConverter.writeLong(result, 0, rootPageOffset); BitConverter.writeInt(result, Long.BYTES, rootPageLength); return new ByteArraySegment(result); }
private ByteArraySegment serializeInt(int value) { byte[] r = new byte[Integer.BYTES]; BitConverter.writeInt(r, 0, value); return new ByteArraySegment(r); }
void serialize() { Preconditions.checkState(this.data != null && !this.data.isReadOnly(), "Cannot serialize a read-only EntryHeader."); // Write length. BitConverter.writeInt(this.data, 0, getEntryLength()); // Write flags. byte flags = isFirstRecordEntry() ? FIRST_ENTRY_MASK : 0; flags |= isLastRecordEntry() ? LAST_ENTRY_MASK : 0; this.data.set(FLAGS_OFFSET, flags); } }
/** * Serializes the given {@link TableKey} for removal into the given array. * * @param tableKey The {@link TableKey} to serialize. * @param target The byte array to serialize to. * @param targetOffset The first offset within the byte array to serialize at. * @return The first offset in the given byte array after the serialization. */ private int serializeRemoval(@NonNull TableKey tableKey, byte[] target, int targetOffset) { val key = tableKey.getKey(); Preconditions.checkArgument(key.getLength() <= MAX_KEY_LENGTH, "Key too large."); int serializationLength = getRemovalLength(tableKey); Preconditions.checkElementIndex(targetOffset + serializationLength - 1, target.length, "serialization does not fit in target buffer"); // Serialize Header. target[targetOffset] = CURRENT_SERIALIZATION_VERSION; targetOffset++; targetOffset += BitConverter.writeInt(target, targetOffset, key.getLength()); targetOffset += BitConverter.writeInt(target, targetOffset, NO_VALUE); // Key System.arraycopy(key.array(), key.arrayOffset(), target, targetOffset, key.getLength()); return targetOffset + key.getLength(); }
private static byte[] hashWithCollisions(ArrayView arrayView) { int hashValue = HashHelper.seededWith(IndexReaderWriterTests.class.getName()).hashToBucket(arrayView, COLLISION_HASH_BUCKETS); byte[] result = new byte[KeyHasher.HASH_SIZE_BYTES]; BitConverter.writeInt(result, 0, hashValue); return result; } }
void commit() { Preconditions.checkState(this.buffer != null && !this.buffer.isReadOnly(), "Cannot commit a read-only FrameHeader"); assert this.buffer.getLength() == SERIALIZATION_LENGTH; // We already checked the size of the target buffer (in the constructor); no need to do it here again. int bufferOffset = 0; this.buffer.set(bufferOffset, getVersion()); bufferOffset += Byte.BYTES; bufferOffset += BitConverter.writeInt(this.buffer, bufferOffset, getContentLength()); this.buffer.set(bufferOffset, encodeFlags()); } }
@Override CompletableFuture<Void> recordLastStreamSegment(final String scope, final String stream, final int lastActiveSegment, OperationContext context, final Executor executor) { final String deletePath = String.format(DELETED_STREAMS_PATH, getScopedStreamName(scope, stream)); byte[] maxSegmentNumberBytes = new byte[Integer.BYTES]; BitConverter.writeInt(maxSegmentNumberBytes, 0, lastActiveSegment); return storeHelper.getData(deletePath) .exceptionally(e -> { if (e instanceof StoreException.DataNotFoundException) { return null; } else { throw new CompletionException(e); } }) .thenCompose(data -> { log.debug("Recording last segment {} for stream {}/{} on deletion.", lastActiveSegment, scope, stream); if (data == null) { return Futures.toVoid(storeHelper.createZNodeIfNotExist(deletePath, maxSegmentNumberBytes)); } else { final int oldLastActiveSegment = BitConverter.readInt(data.getData(), 0); Preconditions.checkArgument(lastActiveSegment >= oldLastActiveSegment, "Old last active segment ({}) for {}/{} is higher than current one {}.", oldLastActiveSegment, scope, stream, lastActiveSegment); return Futures.toVoid(storeHelper.setData(deletePath, new Data(maxSegmentNumberBytes, data.getVersion()))); } }); }
/** * Serializes the given {@link TableEntry} to the given byte array. * * @param entry The {@link TableEntry} to serialize. * @param target The byte array to serialize to. * @param targetOffset The first offset within the byte array to serialize at. * @return The first offset in the given byte array after the serialization. */ private int serializeUpdate(@NonNull TableEntry entry, byte[] target, int targetOffset) { val key = entry.getKey().getKey(); val value = entry.getValue(); Preconditions.checkArgument(key.getLength() <= MAX_KEY_LENGTH, "Key too large."); int serializationLength = getUpdateLength(entry); Preconditions.checkArgument(serializationLength <= MAX_SERIALIZATION_LENGTH, "Key+Value serialization too large."); Preconditions.checkElementIndex(targetOffset + serializationLength - 1, target.length, "serialization does not fit in target buffer"); // Serialize Header. target[targetOffset] = CURRENT_SERIALIZATION_VERSION; targetOffset++; targetOffset += BitConverter.writeInt(target, targetOffset, key.getLength()); targetOffset += BitConverter.writeInt(target, targetOffset, value.getLength()); // Key System.arraycopy(key.array(), key.arrayOffset(), target, targetOffset, key.getLength()); targetOffset += key.getLength(); // Value. System.arraycopy(value.array(), value.arrayOffset(), target, targetOffset, value.getLength()); targetOffset += value.getLength(); return targetOffset; }
BitConverter.writeInt(entryData, 0, count + 1);