private byte[] serialize(byte[] value) { final int len = LZ4_COMPRESSOR.maxCompressedLength(value.length); final byte[] out = new byte[len]; final int compressedSize = LZ4_COMPRESSOR.compress(value, 0, value.length, out, 0); return ByteBuffer.allocate(compressedSize + Integer.BYTES) .putInt(value.length) .put(out, 0, compressedSize) .array(); } }
@Override ByteBuffer allocateOutBuffer(int inputSize, Closer closer) { ByteBuffer outBuffer = ByteBuffer.allocateDirect(lz4High.maxCompressedLength(inputSize)); closer.register(() -> ByteBufferUtils.free(outBuffer)); return outBuffer; }
@Override public ByteBuf encode(ByteBuf source) { int uncompressedLength = source.readableBytes(); int maxLength = compressor.maxCompressedLength(uncompressedLength); ByteBuffer sourceNio = source.nioBuffer(source.readerIndex(), source.readableBytes()); ByteBuf target = PooledByteBufAllocator.DEFAULT.buffer(maxLength, maxLength); ByteBuffer targetNio = target.nioBuffer(0, maxLength); int compressedLength = compressor.compress(sourceNio, 0, uncompressedLength, targetNio, 0, maxLength); target.writerIndex(compressedLength); return target; }
@Override protected byte[] compress(byte[] in) { if (in == null) { throw new NullPointerException("Can't compress null"); } LZ4Compressor compressor = lz4Factory.fastCompressor(); byte[] out = new byte[compressor.maxCompressedLength(in.length)]; int compressedLength = compressor.compress(in, 0, in.length, out, 0); getLogger().debug("Compressed %d bytes to %d", in.length, compressedLength); return ByteBuffer.allocate(Integer.BYTES + compressedLength) .putInt(in.length) .put(out, 0, compressedLength) .array(); }
private ByteBuf allocateBuffer(ChannelHandlerContext ctx, ByteBuf msg, boolean preferDirect, boolean allowEmptyReturn) { int targetBufSize = 0; int remaining = msg.readableBytes() + buffer.readableBytes(); // quick overflow check if (remaining < 0) { throw new EncoderException("too much data to allocate a buffer for compression"); } while (remaining > 0) { int curSize = Math.min(blockSize, remaining); remaining -= curSize; // calculate the total compressed size of the current block (including header) and add to the total targetBufSize += compressor.maxCompressedLength(curSize) + HEADER_LENGTH; } // in addition to just the raw byte count, the headers (HEADER_LENGTH) per block (configured via // #blockSize) will also add to the targetBufSize, and the combination of those would never wrap around // again to be >= 0, this is a good check for the overflow case. if (targetBufSize > maxEncodeSize || 0 > targetBufSize) { throw new EncoderException(String.format("requested encode buffer size (%d bytes) exceeds the maximum " + "allowable size (%d bytes)", targetBufSize, maxEncodeSize)); } if (allowEmptyReturn && targetBufSize < blockSize) { return Unpooled.EMPTY_BUFFER; } if (preferDirect) { return ctx.alloc().ioBuffer(targetBufSize, targetBufSize); } else { return ctx.alloc().heapBuffer(targetBufSize, targetBufSize); } }
@Override public ByteBuf encode(Object in) throws IOException { ByteBuf bytes = null; try { LZ4Compressor compressor = factory.fastCompressor(); bytes = innerCodec.getValueEncoder().encode(in); ByteBuffer srcBuf = bytes.internalNioBuffer(bytes.readerIndex(), bytes.readableBytes()); int outMaxLength = compressor.maxCompressedLength(bytes.readableBytes()); ByteBuf out = ByteBufAllocator.DEFAULT.buffer(outMaxLength + DECOMPRESSION_HEADER_SIZE); out.writeInt(bytes.readableBytes()); ByteBuffer outBuf = out.internalNioBuffer(out.writerIndex(), out.writableBytes()); int pos = outBuf.position(); compressor.compress(srcBuf, outBuf); int compressedLength = outBuf.position() - pos; out.writerIndex(out.writerIndex() + compressedLength); return out; } finally { if (bytes != null) { bytes.release(); } } } };
@Override public ByteBuf encode(Object in) throws IOException { ByteBuf bytes = null; try { LZ4Compressor compressor = factory.fastCompressor(); bytes = innerCodec.getValueEncoder().encode(in); ByteBuffer srcBuf = bytes.internalNioBuffer(bytes.readerIndex(), bytes.readableBytes()); int outMaxLength = compressor.maxCompressedLength(bytes.readableBytes()); ByteBuf out = ByteBufAllocator.DEFAULT.buffer(outMaxLength + DECOMPRESSION_HEADER_SIZE); out.writeInt(bytes.readableBytes()); ByteBuffer outBuf = out.internalNioBuffer(out.writerIndex(), out.writableBytes()); int pos = outBuf.position(); compressor.compress(srcBuf, outBuf); int compressedLength = outBuf.position() - pos; out.writerIndex(out.writerIndex() + compressedLength); return out; } finally { if (bytes != null) { bytes.release(); } } } };
private ByteBuf allocateBuffer(ChannelHandlerContext ctx, ByteBuf msg, boolean preferDirect, boolean allowEmptyReturn) { int targetBufSize = 0; int remaining = msg.readableBytes() + buffer.readableBytes(); // quick overflow check if (remaining < 0) { throw new EncoderException("too much data to allocate a buffer for compression"); } while (remaining > 0) { int curSize = Math.min(blockSize, remaining); remaining -= curSize; // calculate the total compressed size of the current block (including header) and add to the total targetBufSize += compressor.maxCompressedLength(curSize) + HEADER_LENGTH; } // in addition to just the raw byte count, the headers (HEADER_LENGTH) per block (configured via // #blockSize) will also add to the targetBufSize, and the combination of those would never wrap around // again to be >= 0, this is a good check for the overflow case. if (targetBufSize > maxEncodeSize || 0 > targetBufSize) { throw new EncoderException(String.format("requested encode buffer size (%d bytes) exceeds the maximum " + "allowable size (%d bytes)", targetBufSize, maxEncodeSize)); } if (allowEmptyReturn && targetBufSize < blockSize) { return Unpooled.EMPTY_BUFFER; } if (preferDirect) { return ctx.alloc().ioBuffer(targetBufSize, targetBufSize); } else { return ctx.alloc().heapBuffer(targetBufSize, targetBufSize); } }
private ChannelFuture finishEncode(final ChannelHandlerContext ctx, ChannelPromise promise) { if (finished) { promise.setSuccess(); return promise; } finished = true; final ByteBuf footer = ctx.alloc().heapBuffer( compressor.maxCompressedLength(buffer.readableBytes()) + HEADER_LENGTH); flushBufferedData(footer); final int idx = footer.writerIndex(); footer.setLong(idx, MAGIC_NUMBER); footer.setByte(idx + TOKEN_OFFSET, (byte) (BLOCK_TYPE_NON_COMPRESSED | compressionLevel)); footer.setInt(idx + COMPRESSED_LENGTH_OFFSET, 0); footer.setInt(idx + DECOMPRESSED_LENGTH_OFFSET, 0); footer.setInt(idx + CHECKSUM_OFFSET, 0); footer.writerIndex(idx + HEADER_LENGTH); return ctx.writeAndFlush(footer, promise); }
private ChannelFuture finishEncode(final ChannelHandlerContext ctx, ChannelPromise promise) { if (finished) { promise.setSuccess(); return promise; } finished = true; final ByteBuf footer = ctx.alloc().heapBuffer( compressor.maxCompressedLength(buffer.readableBytes()) + HEADER_LENGTH); flushBufferedData(footer); final int idx = footer.writerIndex(); footer.setLong(idx, MAGIC_NUMBER); footer.setByte(idx + TOKEN_OFFSET, (byte) (BLOCK_TYPE_NON_COMPRESSED | compressionLevel)); footer.setInt(idx + COMPRESSED_LENGTH_OFFSET, 0); footer.setInt(idx + DECOMPRESSED_LENGTH_OFFSET, 0); footer.setInt(idx + CHECKSUM_OFFSET, 0); footer.writerIndex(idx + HEADER_LENGTH); return ctx.writeAndFlush(footer, promise); }
private ByteBuf allocateBuffer(ChannelHandlerContext ctx, ByteBuf msg, boolean preferDirect, boolean allowEmptyReturn) { int targetBufSize = 0; int remaining = msg.readableBytes() + buffer.readableBytes(); // quick overflow check if (remaining < 0) { throw new EncoderException("too much data to allocate a buffer for compression"); } while (remaining > 0) { int curSize = Math.min(blockSize, remaining); remaining -= curSize; // calculate the total compressed size of the current block (including header) and add to the total targetBufSize += compressor.maxCompressedLength(curSize) + HEADER_LENGTH; } // in addition to just the raw byte count, the headers (HEADER_LENGTH) per block (configured via // #blockSize) will also add to the targetBufSize, and the combination of those would never wrap around // again to be >= 0, this is a good check for the overflow case. if (targetBufSize > maxEncodeSize || 0 > targetBufSize) { throw new EncoderException(String.format("requested encode buffer size (%d bytes) exceeds the maximum " + "allowable size (%d bytes)", targetBufSize, maxEncodeSize)); } if (allowEmptyReturn && targetBufSize < blockSize) { return Unpooled.EMPTY_BUFFER; } if (preferDirect) { return ctx.alloc().ioBuffer(targetBufSize, targetBufSize); } else { return ctx.alloc().heapBuffer(targetBufSize, targetBufSize); } }
private ChannelFuture finishEncode(final ChannelHandlerContext ctx, ChannelPromise promise) { if (finished) { promise.setSuccess(); return promise; } finished = true; final ByteBuf footer = ctx.alloc().heapBuffer( compressor.maxCompressedLength(buffer.readableBytes()) + HEADER_LENGTH); flushBufferedData(footer); final int idx = footer.writerIndex(); footer.setLong(idx, MAGIC_NUMBER); footer.setByte(idx + TOKEN_OFFSET, (byte) (BLOCK_TYPE_NON_COMPRESSED | compressionLevel)); footer.setInt(idx + COMPRESSED_LENGTH_OFFSET, 0); footer.setInt(idx + DECOMPRESSED_LENGTH_OFFSET, 0); footer.setInt(idx + CHECKSUM_OFFSET, 0); footer.writerIndex(idx + HEADER_LENGTH); return ctx.writeAndFlush(footer, promise); }
final int check = (int) checksum.getValue(); final int bufSize = compressor.maxCompressedLength(flushableBytes) + HEADER_LENGTH; out.ensureWritable(bufSize); final int idx = out.writerIndex();
final int check = (int) checksum.getValue(); final int bufSize = compressor.maxCompressedLength(flushableBytes) + HEADER_LENGTH; out.ensureWritable(bufSize); final int idx = out.writerIndex();
final int check = (int) checksum.getValue(); final int bufSize = compressor.maxCompressedLength(flushableBytes) + HEADER_LENGTH; out.ensureWritable(bufSize); final int idx = out.writerIndex();
/** * Create a new {@link OutputStream} that will compress data using the LZ4 algorithm. * * @param out The output stream to compress * @param blockSize Default: 4. The block size used during compression. 4=64kb, 5=256kb, 6=1mb, 7=4mb. All other * values will generate an exception * @param blockChecksum Default: false. When true, a XXHash32 checksum is computed and appended to the stream for * every block of data * @param useBrokenFlagDescriptorChecksum Default: false. When true, writes an incorrect FrameDescriptor checksum * compatible with older kafka clients. * @throws IOException */ public KafkaLZ4BlockOutputStream(OutputStream out, int blockSize, boolean blockChecksum, boolean useBrokenFlagDescriptorChecksum) throws IOException { this.out = out; compressor = LZ4Factory.fastestInstance().fastCompressor(); checksum = XXHashFactory.fastestInstance().hash32(); this.useBrokenFlagDescriptorChecksum = useBrokenFlagDescriptorChecksum; bd = new BD(blockSize); flg = new FLG(blockChecksum); bufferOffset = 0; maxBlockSize = bd.getBlockMaximumSize(); buffer = new byte[maxBlockSize]; compressedBuffer = new byte[compressor.maxCompressedLength(maxBlockSize)]; finished = false; writeHeader(); }
private ByteBuf compressDirect(ByteBuf input) throws IOException { int maxCompressedLength = compressor.maxCompressedLength(input.readableBytes()); // If the input is direct we will allocate a direct output buffer as well as this will allow us // to use // LZ4Compressor.compress and so eliminate memory copies. ByteBuf output = input.alloc().directBuffer(INTEGER_BYTES + maxCompressedLength); try { ByteBuffer in = inputNioBuffer(input); // Increase reader index. input.readerIndex(input.writerIndex()); output.writeInt(in.remaining()); ByteBuffer out = outputNioBuffer(output); int written = compressor.compress( in, in.position(), in.remaining(), out, out.position(), out.remaining()); // Set the writer index so the amount of written bytes is reflected output.writerIndex(output.writerIndex() + written); } catch (Exception e) { // release output buffer so we not leak and rethrow exception. output.release(); throw new IOException(e); } return output; }
private ByteBuf compressHeap(ByteBuf input) throws IOException { int maxCompressedLength = compressor.maxCompressedLength(input.readableBytes()); // Not a direct buffer so use byte arrays... int inOffset = input.arrayOffset() + input.readerIndex(); byte[] in = input.array(); int len = input.readableBytes(); // Increase reader index. input.readerIndex(input.writerIndex()); // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and // so // can eliminate the overhead of allocate a new byte[]. ByteBuf output = input.alloc().heapBuffer(INTEGER_BYTES + maxCompressedLength); try { output.writeInt(len); // calculate the correct offset. int offset = output.arrayOffset() + output.writerIndex(); byte[] out = output.array(); int written = compressor.compress(in, inOffset, len, out, offset); // Set the writer index so the amount of written bytes is reflected output.writerIndex(output.writerIndex() + written); } catch (Exception e) { // release output buffer so we not leak and rethrow exception. output.release(); throw new IOException(e); } return output; }
public LZ4CompressingInputStream(InputStream delegate, int blockSize) throws IOException { super(delegate, LZ4_HEADER_SIZE + COMPRESSOR.maxCompressedLength(blockSize)); this.blockSize = blockSize; this.uncompressedBuffer = new byte[blockSize]; Checksum checksum = XXHashFactory.fastestInstance().newStreamingHash32(DEFAULT_SEED).asChecksum(); OutputStream delegateOutputStream = new InternalByteArrayOutputStream(); this.compressingStream = new LZ4BlockOutputStream(delegateOutputStream, blockSize, COMPRESSOR, checksum, true); this.finished = false; }
public ClickHouseLZ4OutputStream(OutputStream stream, int maxCompressBlockSize) { dataWrapper = new LittleEndianDataOutputStream(stream); compressor = factory.fastCompressor(); currentBlock = new byte[maxCompressBlockSize]; compressedBlock = new byte[compressor.maxCompressedLength(maxCompressBlockSize)]; }