public byte[] serialize(String topic, ByteBuffer data) { if (data == null) return null; data.rewind(); if (data.hasArray()) { byte[] arr = data.array(); if (data.arrayOffset() == 0 && arr.length == data.remaining()) { return arr; } } byte[] ret = new byte[data.remaining()]; data.get(ret, 0, ret.length); data.rewind(); return ret; }
@Override public Object get(ByteBuffer buf, int position) { final int size = HyperLogLogCollector.getLatestNumBytesForDenseStorage(); ByteBuffer dataCopyBuffer = ByteBuffer.allocate(size); ByteBuffer mutationBuffer = buf.duplicate(); mutationBuffer.position(position); mutationBuffer.limit(position + size); dataCopyBuffer.put(mutationBuffer); dataCopyBuffer.rewind(); return HyperLogLogCollector.makeCollector(dataCopyBuffer); }
public ByteBuffer toByteBuffer() { final short numNonZeroRegisters = getNumNonZeroRegisters(); // store sparsely if (storageBuffer.remaining() == getNumBytesForDenseStorage() && numNonZeroRegisters < DENSE_THRESHOLD) { final ByteBuffer retVal = ByteBuffer.wrap(new byte[numNonZeroRegisters * 3 + getNumHeaderBytes()]); setVersion(retVal); setRegisterOffset(retVal, getRegisterOffset()); setNumNonZeroRegisters(retVal, numNonZeroRegisters); setMaxOverflowValue(retVal, getMaxOverflowValue()); setMaxOverflowRegister(retVal, getMaxOverflowRegister()); final int startPosition = getPayloadBytePosition(); retVal.position(getPayloadBytePosition(retVal)); final byte[] zipperBuffer = new byte[NUM_BYTES_FOR_BUCKETS]; ByteBuffer roStorageBuffer = storageBuffer.asReadOnlyBuffer(); roStorageBuffer.position(startPosition); roStorageBuffer.get(zipperBuffer); for (int i = 0; i < NUM_BYTES_FOR_BUCKETS; ++i) { if (zipperBuffer[i] != 0) { final short val = (short) (0xffff & (i + startPosition - initPosition)); retVal.putShort(val); retVal.put(zipperBuffer[i]); } } retVal.rewind(); return retVal.asReadOnlyBuffer(); } return storageBuffer.asReadOnlyBuffer(); }
@Override public void getBox(WritableByteChannel writableByteChannel) throws IOException { writableByteChannel.write(getHeader()); ByteBuffer byteBuffer = ByteBuffer.allocate(8); byteBuffer.position(6); IsoTypeWriter.writeUInt16(byteBuffer, dataReferenceIndex); byteBuffer.rewind(); writableByteChannel.write(byteBuffer); writableByteChannel.write(ByteBuffer.wrap(data)); }
private void convertToDenseStorage() { ByteBuffer tmpBuffer = ByteBuffer.allocate(getNumBytesForDenseStorage()); // put header setVersion(tmpBuffer); setRegisterOffset(tmpBuffer, getRegisterOffset()); setNumNonZeroRegisters(tmpBuffer, getNumNonZeroRegisters()); setMaxOverflowValue(tmpBuffer, getMaxOverflowValue()); setMaxOverflowRegister(tmpBuffer, getMaxOverflowRegister()); storageBuffer.position(getPayloadBytePosition()); tmpBuffer.position(getPayloadBytePosition(tmpBuffer)); // put payload while (storageBuffer.hasRemaining()) { tmpBuffer.put(storageBuffer.getShort(), storageBuffer.get()); } tmpBuffer.rewind(); storageBuffer = tmpBuffer; initPosition = 0; }
private RecordBatch loadBatchWithSize(int size, String description) { FileChannel channel = fileRecords.channel(); try { ByteBuffer buffer = ByteBuffer.allocate(size); Utils.readFullyOrFail(channel, buffer, position, description); buffer.rewind(); return toMemoryRecordBatch(buffer); } catch (IOException e) { throw new KafkaException("Failed to load record batch at position " + position + " from " + fileRecords, e); } }
@Test public void testReadNegativeStringSize() { byte[] stringBytes = "foo".getBytes(); ByteBuffer invalidBuffer = ByteBuffer.allocate(2 + stringBytes.length); invalidBuffer.putShort((short) -1); invalidBuffer.put(stringBytes); invalidBuffer.rewind(); try { Type.STRING.read(invalidBuffer); fail("String size not validated"); } catch (SchemaException e) { // Expected exception } }
/** * Read a size-delimited byte buffer starting at the given offset. * @param buffer Buffer containing the size and data * @param start Offset in the buffer to read from * @return A slice of the buffer containing only the delimited data (excluding the size) */ public static ByteBuffer sizeDelimited(ByteBuffer buffer, int start) { int size = buffer.getInt(start); if (size < 0) { return null; } else { ByteBuffer b = buffer.duplicate(); b.position(start + 4); b = b.slice(); b.limit(size); b.rewind(); return b; } }
@Test public void testReadableChannel() throws IOException { Resource resource = new FileSystemResource(getClass().getResource("Resource.class").getFile()); ReadableByteChannel channel = null; try { channel = resource.readableChannel(); ByteBuffer buffer = ByteBuffer.allocate((int) resource.contentLength()); channel.read(buffer); buffer.rewind(); assertTrue(buffer.limit() > 0); } finally { if (channel != null) { channel.close(); } } }
@Test public void toArrayDirectByteBuffer() { byte[] input = {0, 1, 2, 3, 4}; ByteBuffer buffer = ByteBuffer.allocateDirect(5); buffer.put(input); buffer.rewind(); assertArrayEquals(input, Utils.toArray(buffer)); assertEquals(0, buffer.position()); assertArrayEquals(new byte[] {1, 2}, Utils.toArray(buffer, 1, 2)); assertEquals(0, buffer.position()); buffer.position(2); assertArrayEquals(new byte[] {2, 3, 4}, Utils.toArray(buffer)); assertEquals(2, buffer.position()); }
@Override public FileChannelRecordBatch nextBatch() throws IOException { FileChannel channel = fileRecords.channel(); if (position >= end - HEADER_SIZE_UP_TO_MAGIC) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); // V0 has the smallest overhead, stricter checking is done later if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Found record size %d smaller than minimum record " + "overhead (%d) in file %s.", size, LegacyRecord.RECORD_OVERHEAD_V0, fileRecords.file())); if (position > end - LOG_OVERHEAD - size) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, fileRecords, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, fileRecords, position, size); position += batch.sizeInBytes(); return batch; }
@Override protected void getContent(ByteBuffer byteBuffer) { writeVersionAndFlags(byteBuffer); IsoTypeWriter.writeUInt64(byteBuffer, systemId.getMostSignificantBits()); IsoTypeWriter.writeUInt64(byteBuffer, systemId.getLeastSignificantBits()); ByteBuffer data = protectionSpecificHeader.getData(); data.rewind(); IsoTypeWriter.writeUInt32(byteBuffer, data.limit()); byteBuffer.put(data); }