public void testByteBuffer() throws IOException { byte[] string = "Hello".getBytes(); ByteBuffer buf = ByteBuffer.allocate(1024); buf.order(ByteOrder.LITTLE_ENDIAN); buf.putInt(123); buf.putChar('z'); buf.putShort((short) 2017); buf.putFloat(3.1415f); buf.put(string); buf.putLong(9876543210L); buf.putDouble(3.14159265); buf.put((byte) 5); buf.flip(); // important // TestUtils.showBuffer(buf); assertEquals(123, buf.getInt()); assertEquals('z', buf.getChar()); assertEquals(2017, buf.getShort()); assertEquals(3.1415f, buf.getFloat()); byte[] so = new byte[string.length]; buf.get(so); assertTrue(TestUtils.sameBytes(string, so)); assertEquals(9876543210L, buf.getLong()); assertEquals(3.14159265, buf.getDouble()); assertEquals((byte) 5, buf.get()); }
@Override public AppendMessageResult<Void> doAppend(long baseOffset, ByteBuffer targetBuffer, int freeSpace, ConsumerLogMessage message) { workingBuffer.clear(); final long wroteOffset = baseOffset + targetBuffer.position(); workingBuffer.flip(); workingBuffer.limit(CONSUMER_LOG_UNIT_BYTES); workingBuffer.putLong(System.currentTimeMillis()); workingBuffer.putLong(message.getOffset()); workingBuffer.putInt(message.getSize()); workingBuffer.putShort(message.getHeaderSize()); targetBuffer.put(workingBuffer.array(), 0, CONSUMER_LOG_UNIT_BYTES); return new AppendMessageResult<>(AppendMessageStatus.SUCCESS, wroteOffset, CONSUMER_LOG_UNIT_BYTES); } }
public static ImmutableRTree newImmutableFromMutable(RTree rTree) { if (rTree.getSize() == 0) { return empty(); } ByteBuffer buffer = ByteBuffer.allocate(calcNumBytes(rTree)); buffer.put(VERSION); buffer.putInt(rTree.getNumDims()); rTree.getRoot().storeInByteBuffer(buffer, buffer.position()); buffer.position(0); return new ImmutableRTree(buffer, rTree.getBitmapFactory()); }
/** * Adds metadata at current position (position is moved forward). Does not flip or reset. * @return The passed <code>destination</code> with metadata added. */ private ByteBuffer addMetaData(final ByteBuffer destination, boolean includeNextBlockMetadata) { destination.put(this.fileContext.isUseHBaseChecksum() ? (byte) 1 : (byte) 0); destination.putLong(this.offset); if (includeNextBlockMetadata) { destination.putInt(this.nextBlockOnDiskSize); } return destination; }
@Override public byte[] serialize(HadoopFsRecoverable obj) throws IOException { final byte[] targetFileBytes = obj.targetFile().toString().getBytes(CHARSET); final byte[] tempFileBytes = obj.tempFile().toString().getBytes(CHARSET); final byte[] targetBytes = new byte[20 + targetFileBytes.length + tempFileBytes.length]; ByteBuffer bb = ByteBuffer.wrap(targetBytes).order(ByteOrder.LITTLE_ENDIAN); bb.putInt(MAGIC_NUMBER); bb.putLong(obj.offset()); bb.putInt(targetFileBytes.length); bb.putInt(tempFileBytes.length); bb.put(targetFileBytes); bb.put(tempFileBytes); return targetBytes; }
@Test public void testUpdateInt() { final int value = 1000; final ByteBuffer buffer = ByteBuffer.allocate(4); buffer.putInt(value); Checksum crc1 = Crc32C.create(); Checksum crc2 = Crc32C.create(); Checksums.updateInt(crc1, value); crc2.update(buffer.array(), buffer.arrayOffset(), 4); assertEquals("Crc values should be the same", crc1.getValue(), crc2.getValue()); }
public static ByteBuffer socketAddress2ByteBuffer(final SocketAddress socketAddress, final ByteBuffer byteBuffer) { InetSocketAddress inetSocketAddress = (InetSocketAddress) socketAddress; byteBuffer.put(inetSocketAddress.getAddress().getAddress(), 0, 4); byteBuffer.putInt(inetSocketAddress.getPort()); byteBuffer.flip(); return byteBuffer; }
/** * Serializes histogram fields that are common to both the full and sparse encoding modes. * * @param buf Destination buffer */ private void writeByteBufferCommonFields(ByteBuffer buf) { buf.putDouble(lowerLimit); buf.putDouble(upperLimit); buf.putInt(numBuckets); buf.put((byte) outlierHandlingMode.ordinal()); buf.putLong(count); buf.putLong(lowerOutlierCount); buf.putLong(upperOutlierCount); buf.putLong(missingValueCount); buf.putDouble(max); buf.putDouble(min); }
private MemoryRecords buildOverflowBatch(int remaining) { // We do not have any records left to down-convert. Construct an overflow message for the length remaining. // This message will be ignored by the consumer because its length will be past the length of maximum // possible response size. // DefaultRecordBatch => // BaseOffset => Int64 // Length => Int32 // ... ByteBuffer overflowMessageBatch = ByteBuffer.allocate( Math.max(MIN_OVERFLOW_MESSAGE_LENGTH, Math.min(remaining + 1, MAX_READ_SIZE))); overflowMessageBatch.putLong(-1L); // Fill in the length of the overflow batch. A valid batch must be at least as long as the minimum batch // overhead. overflowMessageBatch.putInt(Math.max(remaining + 1, DefaultRecordBatch.RECORD_BATCH_OVERHEAD)); log.debug("Constructed overflow message batch for partition {} with length={}", topicPartition(), remaining); return MemoryRecords.readableRecords(overflowMessageBatch); }
public int storeInByteBuffer(ByteBuffer buffer, int position) { buffer.position(position); buffer.putShort((short) (((isLeaf ? 0x1 : 0x0) << 15) | getChildren().size())); for (float v : getMinCoordinates()) { buffer.putFloat(v); } for (float v : getMaxCoordinates()) { buffer.putFloat(v); } byte[] bytes = bitmap.toBytes(); buffer.putInt(bytes.length); buffer.put(bytes); int pos = buffer.position(); int childStartOffset = pos + getChildren().size() * Integer.BYTES; for (Node child : getChildren()) { buffer.putInt(pos, childStartOffset); childStartOffset = child.storeInByteBuffer(buffer, childStartOffset); pos += Integer.BYTES; } return childStartOffset; }