/** * Creates a new LZF decoder with specified decoding instance. * * @param safeInstance * If {@code true} decoder will use {@link ChunkDecoder} that only uses standard JDK access methods, * and should work on all Java platforms and JVMs. * Otherwise decoder will try to use highly optimized {@link ChunkDecoder} implementation that uses * Sun JDK's {@link sun.misc.Unsafe} class (which may be included by other JDK's as well). */ public LzfDecoder(boolean safeInstance) { decoder = safeInstance ? ChunkDecoderFactory.safeInstance() : ChunkDecoderFactory.optimalInstance(); recycler = BufferRecycler.instance(); }
/** * Creates a new LZF encoder with specified settings. * * @param safeInstance * If {@code true} encoder will use {@link ChunkEncoder} that only uses standard JDK access methods, * and should work on all Java platforms and JVMs. * Otherwise encoder will try to use highly optimized {@link ChunkEncoder} implementation that uses * Sun JDK's {@link sun.misc.Unsafe} class (which may be included by other JDK's as well). * @param totalLength * Expected total length of content to compress; only matters for outgoing messages that is smaller * than maximum chunk size (64k), to optimize encoding hash tables. */ public LzfEncoder(boolean safeInstance, int totalLength) { super(false); if (totalLength < MIN_BLOCK_TO_COMPRESS || totalLength > MAX_CHUNK_LEN) { throw new IllegalArgumentException("totalLength: " + totalLength + " (expected: " + MIN_BLOCK_TO_COMPRESS + '-' + MAX_CHUNK_LEN + ')'); } encoder = safeInstance ? ChunkEncoderFactory.safeNonAllocatingInstance(totalLength) : ChunkEncoderFactory.optimalNonAllocatingInstance(totalLength); recycler = BufferRecycler.instance(); }
@Override public BufferRecycler get() { log.info("Allocating new bufferRecycler[%,d]", counter.incrementAndGet()); return new BufferRecycler(); } }
@Override protected void encode(ChannelHandlerContext ctx, ByteBuf in, ByteBuf out) throws Exception { final int length = in.readableBytes(); final int idx = in.readerIndex(); final byte[] input; final int inputPtr; if (in.hasArray()) { input = in.array(); inputPtr = in.arrayOffset() + idx; } else { input = recycler.allocInputBuffer(length); in.getBytes(idx, input, 0, length); inputPtr = 0; } final int maxOutputLength = LZFEncoder.estimateMaxWorkspaceSize(length); out.ensureWritable(maxOutputLength); final byte[] output = out.array(); final int outputPtr = out.arrayOffset() + out.writerIndex(); final int outputLength = LZFEncoder.appendEncoded(encoder, input, inputPtr, length, output, outputPtr) - outputPtr; out.writerIndex(out.writerIndex() + outputLength); in.skipBytes(length); if (!in.hasArray()) { recycler.releaseInputBuffer(input); } } }
inPos = in.arrayOffset() + idx; } else { inputArray = recycler.allocInputBuffer(chunkLength); in.getBytes(idx, inputArray, 0, chunkLength); inPos = 0; decoder.decodeChunk(inputArray, inPos, outputArray, outPos, outPos + originalLength); uncompressed.writerIndex(uncompressed.writerIndex() + originalLength); out.add(uncompressed); recycler.releaseInputBuffer(inputArray);
public LZFInputStream(final ChunkDecoder decoder, final InputStream in) throws IOException { this(decoder, in, BufferRecycler.instance(), false); }
public LZFInputStream(final ChunkDecoder decoder, final InputStream in, final BufferRecycler bufferRecycler, boolean fullReads) throws IOException { super(); _decoder = decoder; _recycler = bufferRecycler; _inputStream = in; _inputStreamClosed = false; _cfgFullReads = fullReads; _inputBuffer = bufferRecycler.allocInputBuffer(LZFChunk.MAX_CHUNK_LEN); _decodedBytes = bufferRecycler.allocDecodeBuffer(LZFChunk.MAX_CHUNK_LEN); }
/** * Call to this method will result in call to * {@link Uncompressor#complete()}, which is idempotent * (i.e. can be called multiple times without ill effects). */ @Override public void close() throws IOException { _uncompressor.complete(); }
protected byte[] _getTmpBuffer() { if (_tmpBuffer == null) { _tmpBuffer = _bufferRecycler.allocDecodeBuffer(INPUT_BUFFER_SIZE); } return _tmpBuffer; }
public LZFOutputStream(final ChunkEncoder encoder, final OutputStream outputStream, final int bufferSize, BufferRecycler bufferRecycler) { super(outputStream); _encoder = encoder; if (bufferRecycler==null) { bufferRecycler = _encoder._recycler; } _recycler = bufferRecycler; _outputBuffer = bufferRecycler.allocOutputBuffer(bufferSize); _outputStreamClosed = false; }
@Override protected void encode(ChannelHandlerContext ctx, ByteBuf in, ByteBuf out) throws Exception { final int length = in.readableBytes(); final int idx = in.readerIndex(); final byte[] input; final int inputPtr; if (in.hasArray()) { input = in.array(); inputPtr = in.arrayOffset() + idx; } else { input = recycler.allocInputBuffer(length); in.getBytes(idx, input, 0, length); inputPtr = 0; } final int maxOutputLength = LZFEncoder.estimateMaxWorkspaceSize(length); out.ensureWritable(maxOutputLength); final byte[] output = out.array(); final int outputPtr = out.arrayOffset() + out.writerIndex(); final int outputLength = LZFEncoder.appendEncoded(encoder, input, inputPtr, length, output, outputPtr) - outputPtr; out.writerIndex(out.writerIndex() + outputLength); in.skipBytes(length); if (!in.hasArray()) { recycler.releaseInputBuffer(input); } } }
/** * Creates a new LZF decoder with specified decoding instance. * * @param safeInstance * If {@code true} decoder will use {@link ChunkDecoder} that only uses standard JDK access methods, * and should work on all Java platforms and JVMs. * Otherwise decoder will try to use highly optimized {@link ChunkDecoder} implementation that uses * Sun JDK's {@link sun.misc.Unsafe} class (which may be included by other JDK's as well). */ public LzfDecoder(boolean safeInstance) { decoder = safeInstance ? ChunkDecoderFactory.safeInstance() : ChunkDecoderFactory.optimalInstance(); recycler = BufferRecycler.instance(); }
/** * Creates a new LZF encoder with specified settings. * * @param safeInstance * If {@code true} encoder will use {@link ChunkEncoder} that only uses standard JDK access methods, * and should work on all Java platforms and JVMs. * Otherwise encoder will try to use highly optimized {@link ChunkEncoder} implementation that uses * Sun JDK's {@link sun.misc.Unsafe} class (which may be included by other JDK's as well). * @param totalLength * Expected total length of content to compress; only matters for outgoing messages that is smaller * than maximum chunk size (64k), to optimize encoding hash tables. */ public LzfEncoder(boolean safeInstance, int totalLength) { super(false); if (totalLength < MIN_BLOCK_TO_COMPRESS || totalLength > MAX_CHUNK_LEN) { throw new IllegalArgumentException("totalLength: " + totalLength + " (expected: " + MIN_BLOCK_TO_COMPRESS + '-' + MAX_CHUNK_LEN + ')'); } encoder = safeInstance ? ChunkEncoderFactory.safeNonAllocatingInstance(totalLength) : ChunkEncoderFactory.optimalNonAllocatingInstance(totalLength); recycler = BufferRecycler.instance(); }
inPos = in.arrayOffset() + idx; } else { inputArray = recycler.allocInputBuffer(chunkLength); in.getBytes(idx, inputArray, 0, chunkLength); inPos = 0; decoder.decodeChunk(inputArray, inPos, outputArray, outPos, outPos + originalLength); uncompressed.writerIndex(uncompressed.writerIndex() + originalLength); out.add(uncompressed); recycler.releaseInputBuffer(inputArray);
/** * Uses a ThreadLocal soft-referenced BufferRecycler instance. * * @param totalLength Total encoded length; used for calculating size * of hash table to use */ protected ChunkEncoder(int totalLength) { this(totalLength, BufferRecycler.instance()); }
protected byte[] _getTmpBuffer() { if (_tmpBuffer == null) { _tmpBuffer = _bufferRecycler.allocDecodeBuffer(INPUT_BUFFER_SIZE); } return _tmpBuffer; }
@Override protected void encode(ChannelHandlerContext ctx, ByteBuf in, ByteBuf out) throws Exception { final int length = in.readableBytes(); final int idx = in.readerIndex(); final byte[] input; final int inputPtr; if (in.hasArray()) { input = in.array(); inputPtr = in.arrayOffset() + idx; } else { input = recycler.allocInputBuffer(length); in.getBytes(idx, input, 0, length); inputPtr = 0; } final int maxOutputLength = LZFEncoder.estimateMaxWorkspaceSize(length); out.ensureWritable(maxOutputLength); final byte[] output = out.array(); final int outputPtr = out.arrayOffset() + out.writerIndex(); final int outputLength = LZFEncoder.appendEncoded(encoder, input, inputPtr, length, output, outputPtr) - outputPtr; out.writerIndex(out.writerIndex() + outputLength); in.skipBytes(length); if (!in.hasArray()) { recycler.releaseInputBuffer(input); } } }
/** * Creates a new LZF decoder with specified decoding instance. * * @param safeInstance * If {@code true} decoder will use {@link ChunkDecoder} that only uses standard JDK access methods, * and should work on all Java platforms and JVMs. * Otherwise decoder will try to use highly optimized {@link ChunkDecoder} implementation that uses * Sun JDK's {@link sun.misc.Unsafe} class (which may be included by other JDK's as well). */ public LzfDecoder(boolean safeInstance) { decoder = safeInstance ? ChunkDecoderFactory.safeInstance() : ChunkDecoderFactory.optimalInstance(); recycler = BufferRecycler.instance(); }
/** * Creates a new LZF encoder with specified settings. * * @param safeInstance * If {@code true} encoder will use {@link ChunkEncoder} that only uses standard JDK access methods, * and should work on all Java platforms and JVMs. * Otherwise encoder will try to use highly optimized {@link ChunkEncoder} implementation that uses * Sun JDK's {@link sun.misc.Unsafe} class (which may be included by other JDK's as well). * @param totalLength * Expected total length of content to compress; only matters for outgoing messages that is smaller * than maximum chunk size (64k), to optimize encoding hash tables. */ public LzfEncoder(boolean safeInstance, int totalLength) { super(false); if (totalLength < MIN_BLOCK_TO_COMPRESS || totalLength > MAX_CHUNK_LEN) { throw new IllegalArgumentException("totalLength: " + totalLength + " (expected: " + MIN_BLOCK_TO_COMPRESS + '-' + MAX_CHUNK_LEN + ')'); } encoder = safeInstance ? ChunkEncoderFactory.safeNonAllocatingInstance(totalLength) : ChunkEncoderFactory.optimalNonAllocatingInstance(totalLength); recycler = BufferRecycler.instance(); }
inPos = in.arrayOffset() + idx; } else { inputArray = recycler.allocInputBuffer(chunkLength); in.getBytes(idx, inputArray, 0, chunkLength); inPos = 0; decoder.decodeChunk(inputArray, inPos, outputArray, outPos, outPos + originalLength); uncompressed.writerIndex(uncompressed.writerIndex() + originalLength); out.add(uncompressed); recycler.releaseInputBuffer(inputArray);