Refine search
@Override protected void encode(ChannelHandlerContext ctx, ByteBuf in, ByteBuf out) throws Exception { final int length = in.readableBytes(); final int idx = in.readerIndex(); final byte[] input; final int inputPtr; if (in.hasArray()) { input = in.array(); inputPtr = in.arrayOffset() + idx; } else { input = recycler.allocInputBuffer(length); in.getBytes(idx, input, 0, length); inputPtr = 0; } final int maxOutputLength = LZFEncoder.estimateMaxWorkspaceSize(length); out.ensureWritable(maxOutputLength); final byte[] output = out.array(); final int outputPtr = out.arrayOffset() + out.writerIndex(); final int outputLength = LZFEncoder.appendEncoded(encoder, input, inputPtr, length, output, outputPtr) - outputPtr; out.writerIndex(out.writerIndex() + outputLength); in.skipBytes(length); if (!in.hasArray()) { recycler.releaseInputBuffer(input); } } }
/** * Creates a new LZF decoder with specified decoding instance. * * @param safeInstance * If {@code true} decoder will use {@link ChunkDecoder} that only uses standard JDK access methods, * and should work on all Java platforms and JVMs. * Otherwise decoder will try to use highly optimized {@link ChunkDecoder} implementation that uses * Sun JDK's {@link sun.misc.Unsafe} class (which may be included by other JDK's as well). */ public LzfDecoder(boolean safeInstance) { decoder = safeInstance ? ChunkDecoderFactory.safeInstance() : ChunkDecoderFactory.optimalInstance(); recycler = BufferRecycler.instance(); }
@Override public BufferRecycler get() { log.info("Allocating new bufferRecycler[%,d]", counter.incrementAndGet()); return new BufferRecycler(); } }
/** * @param in Underlying input stream to use * @param fullReads Whether {@link #read(byte[])} should try to read exactly * as many bytes as requested (true); or just however many happen to be * available (false) */ public LZFInputStream(final InputStream in, boolean fullReads) throws IOException { this(ChunkDecoderFactory.optimalInstance(), in, BufferRecycler.instance(), fullReads); }
public LZFInputStream(final ChunkDecoder decoder, final InputStream in, final BufferRecycler bufferRecycler, boolean fullReads) throws IOException { super(); _decoder = decoder; _recycler = bufferRecycler; _inputStream = in; _inputStreamClosed = false; _cfgFullReads = fullReads; _inputBuffer = bufferRecycler.allocInputBuffer(LZFChunk.MAX_CHUNK_LEN); _decodedBytes = bufferRecycler.allocDecodeBuffer(LZFChunk.MAX_CHUNK_LEN); }
/** * Creates a new LZF encoder with specified settings. * * @param safeInstance * If {@code true} encoder will use {@link ChunkEncoder} that only uses standard JDK access methods, * and should work on all Java platforms and JVMs. * Otherwise encoder will try to use highly optimized {@link ChunkEncoder} implementation that uses * Sun JDK's {@link sun.misc.Unsafe} class (which may be included by other JDK's as well). * @param totalLength * Expected total length of content to compress; only matters for outgoing messages that is smaller * than maximum chunk size (64k), to optimize encoding hash tables. */ public LzfEncoder(boolean safeInstance, int totalLength) { super(false); if (totalLength < MIN_BLOCK_TO_COMPRESS || totalLength > MAX_CHUNK_LEN) { throw new IllegalArgumentException("totalLength: " + totalLength + " (expected: " + MIN_BLOCK_TO_COMPRESS + '-' + MAX_CHUNK_LEN + ')'); } encoder = safeInstance ? ChunkEncoderFactory.safeNonAllocatingInstance(totalLength) : ChunkEncoderFactory.optimalNonAllocatingInstance(totalLength); recycler = BufferRecycler.instance(); }
public LZFCompressingInputStream(final ChunkEncoder encoder, InputStream in, BufferRecycler bufferRecycler) { // may be passed by caller, or could be null _encoder = encoder; _inputStream = in; if (bufferRecycler==null) { bufferRecycler = (encoder!=null) ? _encoder._recycler : BufferRecycler.instance(); } _recycler = bufferRecycler; _inputBuffer = bufferRecycler.allocInputBuffer(LZFChunk.MAX_CHUNK_LEN); // let's not yet allocate encoding buffer; don't know optimal size }
@Override protected void doClose() throws IOException { byte[] buf = inputBuffer; if (buf != null) { inputBuffer = null; recycler.releaseInputBuffer(buf); } buf = uncompressed; if (buf != null) { uncompressed = null; recycler.releaseDecodeBuffer(uncompressed); } } }
public LZFCompressedStreamInput(StreamInput in, ChunkDecoder decoder) throws IOException { super(in); this.recycler = BufferRecycler.instance(); this.decoder = decoder; this.uncompressed = recycler.allocDecodeBuffer(LZFChunk.MAX_CHUNK_LEN); this.inputBuffer = recycler.allocInputBuffer(LZFChunk.MAX_CHUNK_LEN); }
private void _closeInput() throws IOException { byte[] buf = _inputBuffer; if (buf != null) { _inputBuffer = null; _recycler.releaseInputBuffer(buf); } if (!_inputStreamClosed) { _inputStreamClosed = true; _inputStream.close(); } }
public OptimizedGZIPInputStream(InputStream in, BufferRecycler bufferRecycler, GZIPRecycler gzipRecycler) throws IOException { super(); _bufferRecycler = bufferRecycler; _gzipRecycler = gzipRecycler; _rawInput = in; _buffer = bufferRecycler.allocInputBuffer(INPUT_BUFFER_SIZE); _bufferPtr = _bufferEnd = 0; _inflater = gzipRecycler.allocInflater(); _crc = new CRC32(); // And then need to process header... _readHeader(); _state = State.GZIP_CONTENT; _crc.reset(); // and if all is good, kick start inflater etc if (_bufferPtr >= _bufferEnd) { // need more data _loadMore(); } _inflater.setInput(_buffer, _bufferPtr, _bufferEnd-_bufferPtr); }
/** * @param totalLength Total encoded length; used for calculating size * of hash table to use * @param bufferRecycler Buffer recycler instance, for usages where the * caller manages the recycler instances */ protected ChunkEncoder(int totalLength, BufferRecycler bufferRecycler) { // Need room for at most a single full chunk int largestChunkLen = Math.min(totalLength, LZFChunk.MAX_CHUNK_LEN); int suggestedHashLen = calcHashLen(largestChunkLen); _recycler = bufferRecycler; _hashTable = bufferRecycler.allocEncodingHash(suggestedHashLen); _hashModulo = _hashTable.length - 1; // Ok, then, what's the worst case output buffer length? // length indicator for each 32 literals, so: // 21-Feb-2013, tatu: Plus we want to prepend chunk header in place: int bufferLen = largestChunkLen + ((largestChunkLen + 31) >> 5) + LZFChunk.MAX_HEADER_LEN; _encodeBuffer = bufferRecycler.allocEncodingBuffer(bufferLen); }
protected byte[] _getTmpBuffer() { if (_tmpBuffer == null) { _tmpBuffer = _bufferRecycler.allocDecodeBuffer(INPUT_BUFFER_SIZE); } return _tmpBuffer; }
_encodedBytes = _recycler.allocEncodingBuffer(bufferLen);
/** * @param in Underlying input stream to use * @param fullReads Whether {@link #read(byte[])} should try to read exactly * as many bytes as requested (true); or just however many happen to be * available (false) */ public LZFInputStream(final InputStream in, boolean fullReads) throws IOException { this(ChunkDecoderFactory.optimalInstance(), in, BufferRecycler.instance(), fullReads); }
public LZFInputStream(final ChunkDecoder decoder, final InputStream in, final BufferRecycler bufferRecycler, boolean fullReads) throws IOException { super(); _decoder = decoder; _recycler = bufferRecycler; _inputStream = in; _inputStreamClosed = false; _cfgFullReads = fullReads; _inputBuffer = bufferRecycler.allocInputBuffer(LZFChunk.MAX_CHUNK_LEN); _decodedBytes = bufferRecycler.allocDecodeBuffer(LZFChunk.MAX_CHUNK_LEN); }
/** * Creates a new LZF encoder with specified settings. * * @param safeInstance * If {@code true} encoder will use {@link ChunkEncoder} that only uses standard JDK access methods, * and should work on all Java platforms and JVMs. * Otherwise encoder will try to use highly optimized {@link ChunkEncoder} implementation that uses * Sun JDK's {@link sun.misc.Unsafe} class (which may be included by other JDK's as well). * @param totalLength * Expected total length of content to compress; only matters for outgoing messages that is smaller * than maximum chunk size (64k), to optimize encoding hash tables. */ public LzfEncoder(boolean safeInstance, int totalLength) { super(false); if (totalLength < MIN_BLOCK_TO_COMPRESS || totalLength > MAX_CHUNK_LEN) { throw new IllegalArgumentException("totalLength: " + totalLength + " (expected: " + MIN_BLOCK_TO_COMPRESS + '-' + MAX_CHUNK_LEN + ')'); } encoder = safeInstance ? ChunkEncoderFactory.safeNonAllocatingInstance(totalLength) : ChunkEncoderFactory.optimalNonAllocatingInstance(totalLength); recycler = BufferRecycler.instance(); }
public LZFCompressingInputStream(final ChunkEncoder encoder, InputStream in, BufferRecycler bufferRecycler) { // may be passed by caller, or could be null _encoder = encoder; _inputStream = in; if (bufferRecycler==null) { bufferRecycler = (encoder!=null) ? _encoder._recycler : BufferRecycler.instance(); } _recycler = bufferRecycler; _inputBuffer = bufferRecycler.allocInputBuffer(LZFChunk.MAX_CHUNK_LEN); // let's not yet allocate encoding buffer; don't know optimal size }
@Override public void close() throws IOException { _bufferPosition = _bufferLength = 0; byte[] buf = _inputBuffer; if (buf != null) { _inputBuffer = null; _recycler.releaseInputBuffer(buf); } buf = _decodedBytes; if (buf != null) { _decodedBytes = null; _recycler.releaseDecodeBuffer(buf); } if (!_inputStreamClosed) { _inputStreamClosed = true; super.close(); } }
private void _closeInput() throws IOException { byte[] buf = _inputBuffer; if (buf != null) { _inputBuffer = null; _recycler.releaseInputBuffer(buf); } if (!_inputStreamClosed) { _inputStreamClosed = true; _inputStream.close(); } }