@Override public OutputStream get(int expectedSize) { buf = alloc.buffer(expectedSize); return new ByteBufOutputStream(buf); }
@Override protected ByteBuf allocateBuffer(ChannelHandlerContext ctx, ByteBuf msg, boolean preferDirect) throws Exception { if (preferDirect) { return ctx.alloc().directBuffer(msg.readableBytes()); } else { return ctx.alloc().buffer(msg.readableBytes()); } }
@Override public void flush(ChannelHandlerContext ctx) throws Exception { if (cBuf.isReadable()) { byte[] b = new byte[cBuf.readableBytes()]; cBuf.readBytes(b); cBuf.discardReadComponents(); byte[] wrapped = saslClient.wrap(b, 0, b.length); ByteBuf buf = ctx.alloc().ioBuffer(4 + wrapped.length); buf.writeInt(wrapped.length); buf.writeBytes(wrapped); ctx.write(buf); } ctx.flush(); }
FanOutOneBlockAsyncDFSOutput(Configuration conf, FSUtils fsUtils, DistributedFileSystem dfs, DFSClient client, ClientProtocol namenode, String clientName, String src, long fileId, LocatedBlock locatedBlock, Encryptor encryptor, List<Channel> datanodeList, DataChecksum summer, ByteBufAllocator alloc) { this.conf = conf; this.fsUtils = fsUtils; this.dfs = dfs; this.client = client; this.namenode = namenode; this.fileId = fileId; this.clientName = clientName; this.src = src; this.block = locatedBlock.getBlock(); this.locations = locatedBlock.getLocations(); this.encryptor = encryptor; this.datanodeList = datanodeList; this.summer = summer; this.maxDataLen = MAX_DATA_LEN - (MAX_DATA_LEN % summer.getBytesPerChecksum()); this.alloc = alloc; this.buf = alloc.directBuffer(sendBufSizePRedictor.initialSize()); this.state = State.STREAMING; setupReceiver(conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT)); }
} else { final ByteBufAllocator alloc = alloc(); if (alloc.isDirectBufferPooled()) { buffer = alloc.directBuffer(readableBytes); } else { buffer = org.apache.hbase.thirdparty.io.netty.buffer.ByteBufUtil.threadLocalDirectBuffer();
/** * Allocate a {@link ByteBuf} which will be used as argument of {@link #encode(ChannelHandlerContext, I, ByteBuf)}. * Sub-classes may override this method to return {@link ByteBuf} with a perfect matching {@code initialCapacity}. */ protected ByteBuf allocateBuffer(ChannelHandlerContext ctx, @SuppressWarnings("unused") I msg, boolean preferDirect) throws Exception { if (preferDirect) { return ctx.alloc().ioBuffer(); } else { return ctx.alloc().heapBuffer(); } }
private ByteBuf allocBuffer(int capacity) { return direct ? alloc().directBuffer(capacity) : alloc().heapBuffer(capacity); }
/** * Compose {@code cumulation} and {@code next} into a new {@link CompositeByteBuf}. */ protected final ByteBuf composeIntoComposite(ByteBufAllocator alloc, ByteBuf cumulation, ByteBuf next) { // Create a composite buffer to accumulate this pair and potentially all the buffers // in the queue. Using +2 as we have already dequeued current and next. CompositeByteBuf composite = alloc.compositeBuffer(size() + 2); try { composite.addComponent(true, cumulation); composite.addComponent(true, next); } catch (Throwable cause) { composite.release(); PlatformDependent.throwException(cause); } return composite; }
@Override public int calculateNewCapacity(int minNewCapacity, int maxCapacity) { return allocator.calculateNewCapacity(minNewCapacity, maxCapacity); } }
@Override public CompositeByteBuf compositeDirectBuffer(int maxNumComponents) { return allocator.compositeDirectBuffer(maxNumComponents); }
private void endBlock() throws IOException { Preconditions.checkState(waitingAckQueue.isEmpty(), "should call flush first before calling close"); if (state != State.STREAMING) { throw new IOException("stream already broken"); } state = State.CLOSING; long finalizedLength = ackedBlockLength; PacketHeader header = new PacketHeader(4, finalizedLength, nextPacketSeqno, true, 0, false); buf.release(); buf = null; int headerLen = header.getSerializedSize(); ByteBuf headerBuf = alloc.directBuffer(headerLen); header.putInBuffer(headerBuf.nioBuffer(0, headerLen)); headerBuf.writerIndex(headerLen); CompletableFuture<Long> future = new CompletableFuture<>(); waitingAckQueue.add(new Callback(future, finalizedLength, datanodeList)); datanodeList.forEach(ch -> ch.writeAndFlush(headerBuf.retainedDuplicate())); headerBuf.release(); try { future.get(); } catch (InterruptedException e) { throw (IOException) new InterruptedIOException().initCause(e); } catch (ExecutionException e) { Throwable cause = e.getCause(); Throwables.propagateIfPossible(cause, IOException.class); throw new IOException(cause); } }
/** * Returns an off-heap copy of the specified {@link ByteBuf}, and releases the original one. * Note that this method does not create an off-heap copy if the allocation / deallocation cost is too high, * but just returns the original {@link ByteBuf}.. */ protected final ByteBuf newDirectBuffer(ByteBuf buf) { final int readableBytes = buf.readableBytes(); if (readableBytes == 0) { ReferenceCountUtil.safeRelease(buf); return Unpooled.EMPTY_BUFFER; } final ByteBufAllocator alloc = alloc(); if (alloc.isDirectBufferPooled()) { ByteBuf directBuf = alloc.directBuffer(readableBytes); directBuf.writeBytes(buf, buf.readerIndex(), readableBytes); ReferenceCountUtil.safeRelease(buf); return directBuf; } final ByteBuf directBuf = ByteBufUtil.threadLocalDirectBuffer(); if (directBuf != null) { directBuf.writeBytes(buf, buf.readerIndex(), readableBytes); ReferenceCountUtil.safeRelease(buf); return directBuf; } // Allocating and deallocating an unpooled direct buffer is very expensive; give up. return buf; }
private ByteBuf allocateBuffer(ChannelHandlerContext ctx, ByteBuf msg, boolean preferDirect, boolean allowEmptyReturn) { int targetBufSize = 0; int remaining = msg.readableBytes() + buffer.readableBytes(); // quick overflow check if (remaining < 0) { throw new EncoderException("too much data to allocate a buffer for compression"); } while (remaining > 0) { int curSize = Math.min(blockSize, remaining); remaining -= curSize; // calculate the total compressed size of the current block (including header) and add to the total targetBufSize += compressor.maxCompressedLength(curSize) + HEADER_LENGTH; } // in addition to just the raw byte count, the headers (HEADER_LENGTH) per block (configured via // #blockSize) will also add to the targetBufSize, and the combination of those would never wrap around // again to be >= 0, this is a good check for the overflow case. if (targetBufSize > maxEncodeSize || 0 > targetBufSize) { throw new EncoderException(String.format("requested encode buffer size (%d bytes) exceeds the maximum " + "allowable size (%d bytes)", targetBufSize, maxEncodeSize)); } if (allowEmptyReturn && targetBufSize < blockSize) { return Unpooled.EMPTY_BUFFER; } if (preferDirect) { return ctx.alloc().ioBuffer(targetBufSize, targetBufSize); } else { return ctx.alloc().heapBuffer(targetBufSize, targetBufSize); } }
@Override public ByteBuf copy(int index, int length) { ensureAccessible(); ByteBuffer src; try { src = (ByteBuffer) internalNioBuffer().clear().position(index).limit(index + length); } catch (IllegalArgumentException ignored) { throw new IndexOutOfBoundsException("Too many bytes to read - Need " + (index + length)); } ByteBuf dst = src.isDirect() ? alloc().directBuffer(length) : alloc().heapBuffer(length); dst.writeBytes(src); return dst; }
@Override public ByteBuf cumulate(ByteBufAllocator alloc, ByteBuf cumulation, ByteBuf in) { ByteBuf buffer; if (cumulation.refCnt() > 1) { // Expand cumulation (by replace it) when the refCnt is greater then 1 which may happen when the user // use slice().retain() or duplicate().retain(). // // See: // - https://github.com/netty/netty/issues/2327 // - https://github.com/netty/netty/issues/1764 buffer = expandCumulation(alloc, cumulation, in.readableBytes()); buffer.writeBytes(in); in.release(); } else { CompositeByteBuf composite; if (cumulation instanceof CompositeByteBuf) { composite = (CompositeByteBuf) cumulation; } else { composite = alloc.compositeBuffer(Integer.MAX_VALUE); composite.addComponent(true, cumulation); } composite.addComponent(true, in); buffer = composite; } return buffer; } };
final void ensureWritable0(int minWritableBytes) { ensureAccessible(); if (minWritableBytes <= writableBytes()) { return; } if (minWritableBytes > maxCapacity - writerIndex) { throw new IndexOutOfBoundsException(String.format( "writerIndex(%d) + minWritableBytes(%d) exceeds maxCapacity(%d): %s", writerIndex, minWritableBytes, maxCapacity, this)); } // Normalize the current capacity to the power of 2. int newCapacity = alloc().calculateNewCapacity(writerIndex + minWritableBytes, maxCapacity); // Adjust to the new capacity. capacity(newCapacity); }
@Override public CompositeByteBuf compositeDirectBuffer() { return allocator.compositeDirectBuffer(); }
private void writeResponse(ChannelHandlerContext ctx, byte[] response) { LOG.trace("Sending token size={} from initSASLContext.", response.length); ctx.writeAndFlush( ctx.alloc().buffer(4 + response.length).writeInt(response.length).writeBytes(response)); }
int numChecks = dataLen / chunkLen + (trailingPartialChunkLen != 0 ? 1 : 0); int checksumLen = numChecks * summer.getChecksumSize(); ByteBuf checksumBuf = alloc.directBuffer(checksumLen); summer.calculateChunkedSums(dataBuf.nioBuffer(), checksumBuf.nioBuffer(0, checksumLen)); checksumBuf.writerIndex(checksumLen); nextPacketSeqno, false, dataLen, syncBlock); int headerLen = header.getSerializedSize(); ByteBuf headerBuf = alloc.buffer(headerLen); header.putInBuffer(headerBuf.nioBuffer(0, headerLen)); headerBuf.writerIndex(headerLen);
@Override protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Exception { ByteBuf inBuf; boolean release = false; if (msg.nioBufferCount() == 1) { inBuf = msg; } else { inBuf = ctx.alloc().directBuffer(msg.readableBytes()); msg.readBytes(inBuf); release = true; } ByteBuffer inBuffer = inBuf.nioBuffer(); ByteBuf outBuf = ctx.alloc().directBuffer(inBuf.readableBytes()); ByteBuffer outBuffer = outBuf.nioBuffer(0, inBuf.readableBytes()); decryptor.decrypt(inBuffer, outBuffer); outBuf.writerIndex(inBuf.readableBytes()); if (release) { inBuf.release(); } ctx.fireChannelRead(outBuf); } }