@Override public ChannelFuture sendPushMessage(ChannelHandlerContext ctx, ByteBuf mesg) { final ByteBuf newBuff = ctx.alloc().buffer(); newBuff.ensureWritable(SSE_PREAMBLE.length()); newBuff.writeCharSequence(SSE_PREAMBLE, Charsets.UTF_8); newBuff.ensureWritable(mesg.writableBytes()); newBuff.writeBytes(mesg); newBuff.ensureWritable(SSE_TERMINATION.length()); newBuff.writeCharSequence(SSE_TERMINATION, Charsets.UTF_8); mesg.release(); return ctx.channel().writeAndFlush(newBuff); }
@Override protected final ByteBuf allocateBuffer(ChannelHandlerContext ctx, ByteBuf msg, boolean preferDirect) throws Exception { int sizeEstimate = (int) Math.ceil(msg.readableBytes() * 1.001) + 12; if (writeHeader) { switch (wrapper) { case GZIP: sizeEstimate += gzipHeader.length; break; case ZLIB: sizeEstimate += 2; // first two magic bytes break; default: // no op } } return ctx.alloc().heapBuffer(sizeEstimate); }
private static ByteBuf newDirectBuffer0(Object holder, ByteBuf buf, ByteBufAllocator alloc, int capacity) { final ByteBuf directBuf = alloc.directBuffer(capacity); directBuf.writeBytes(buf, buf.readerIndex(), capacity); ReferenceCountUtil.safeRelease(holder); return directBuf; }
/** * Allocate a {@link ByteBuf} which will be used as argument of {@link #encode(ChannelHandlerContext, I, ByteBuf)}. * Sub-classes may override this method to return {@link ByteBuf} with a perfect matching {@code initialCapacity}. */ protected ByteBuf allocateBuffer(ChannelHandlerContext ctx, @SuppressWarnings("unused") I msg, boolean preferDirect) throws Exception { if (preferDirect) { return ctx.alloc().ioBuffer(); } else { return ctx.alloc().heapBuffer(); } }
/** * Returns an off-heap copy of the specified {@link ByteBuf}, and releases the original one. * Note that this method does not create an off-heap copy if the allocation / deallocation cost is too high, * but just returns the original {@link ByteBuf}.. */ protected final ByteBuf newDirectBuffer(ByteBuf buf) { final int readableBytes = buf.readableBytes(); if (readableBytes == 0) { ReferenceCountUtil.safeRelease(buf); return Unpooled.EMPTY_BUFFER; } final ByteBufAllocator alloc = alloc(); if (alloc.isDirectBufferPooled()) { ByteBuf directBuf = alloc.directBuffer(readableBytes); directBuf.writeBytes(buf, buf.readerIndex(), readableBytes); ReferenceCountUtil.safeRelease(buf); return directBuf; } final ByteBuf directBuf = ByteBufUtil.threadLocalDirectBuffer(); if (directBuf != null) { directBuf.writeBytes(buf, buf.readerIndex(), readableBytes); ReferenceCountUtil.safeRelease(buf); return directBuf; } // Allocating and deallocating an unpooled direct buffer is very expensive; give up. return buf; }
private void handleTransferMessage(ChannelHandlerContext ctx, ProxyMessage proxyMessage) { Channel realServerChannel = ctx.channel().attr(Constants.NEXT_CHANNEL).get(); if (realServerChannel != null) { ByteBuf buf = ctx.alloc().buffer(proxyMessage.getData().length); buf.writeBytes(proxyMessage.getData()); logger.debug("write data to real server, {}", realServerChannel); realServerChannel.writeAndFlush(buf); } }
@Override protected void doEncode(final ChannelHandlerContext context, final PostgreSQLPacket message, final ByteBuf out) { try (PostgreSQLPacketPayload payload = new PostgreSQLPacketPayload(context.alloc().buffer())) { message.write(payload); if (!(message instanceof PostgreSQLSSLNegativePacket)) { out.writeByte(message.getMessageType()); out.writeInt(payload.getByteBuf().readableBytes() + message.PAYLOAD_LENGTH); } out.writeBytes(payload.getByteBuf()); } } }
checkNotNull(buf, "buf"); checkNotNull(charset, "charset"); final int maxIndex = buf.readerIndex() + buf.readableBytes(); if (index < 0 || length < 0 || index > maxIndex - length) { throw new IndexOutOfBoundsException("index: " + index + " length: " + length); return isUtf8(buf, index, length); } else if (charset.equals(CharsetUtil.US_ASCII)) { return isAscii(buf, index, length); } else { CharsetDecoder decoder = CharsetUtil.decoder(charset, CodingErrorAction.REPORT, CodingErrorAction.REPORT); try { if (buf.nioBufferCount() == 1) { decoder.decode(buf.nioBuffer(index, length)); } else { ByteBuf heapBuffer = buf.alloc().heapBuffer(length); try { heapBuffer.writeBytes(buf, index, length);
static ByteBuf expandCumulation(ByteBufAllocator alloc, ByteBuf cumulation, int readable) { ByteBuf oldCumulation = cumulation; cumulation = alloc.buffer(oldCumulation.readableBytes() + readable); cumulation.writeBytes(oldCumulation); oldCumulation.release(); return cumulation; }
private ChannelFuture finishEncode(final ChannelHandlerContext ctx, ChannelPromise promise) { if (finished) { promise.setSuccess(); return promise; } finished = true; final ByteBuf footer = ctx.alloc().heapBuffer( compressor.maxCompressedLength(buffer.readableBytes()) + HEADER_LENGTH); flushBufferedData(footer); final int idx = footer.writerIndex(); footer.setLong(idx, MAGIC_NUMBER); footer.setByte(idx + TOKEN_OFFSET, (byte) (BLOCK_TYPE_NON_COMPRESSED | compressionLevel)); footer.setInt(idx + COMPRESSED_LENGTH_OFFSET, 0); footer.setInt(idx + DECOMPRESSED_LENGTH_OFFSET, 0); footer.setInt(idx + CHECKSUM_OFFSET, 0); footer.writerIndex(idx + HEADER_LENGTH); return ctx.writeAndFlush(footer, promise); }
H m = (H) msg; buf = ctx.alloc().buffer((int) headersEncodedSizeAccumulator); ByteBufUtil.writeShortBE(buf, CRLF_SHORT); headersEncodedSizeAccumulator = HEADERS_WEIGHT_NEW * padSizeForAccumulation(buf.readableBytes()) + HEADERS_WEIGHT_HISTORICAL * headersEncodedSizeAccumulator; if (!potentialEmptyBuf.isReadable()) { out.add(potentialEmptyBuf.retain()); return;
/** * Compose {@code cumulation} and {@code next} into a new {@link ByteBufAllocator#ioBuffer()}. * @param alloc The allocator to use to allocate the new buffer. * @param cumulation The current cumulation. * @param next The next buffer. * @return The result of {@code cumulation + next}. */ protected final ByteBuf copyAndCompose(ByteBufAllocator alloc, ByteBuf cumulation, ByteBuf next) { ByteBuf newCumulation = alloc.ioBuffer(cumulation.readableBytes() + next.readableBytes()); try { newCumulation.writeBytes(cumulation).writeBytes(next); } catch (Throwable cause) { newCumulation.release(); safeRelease(next); throwException(cause); } cumulation.release(); next.release(); return newCumulation; }
@Override protected void doEncode(final ChannelHandlerContext context, final MySQLPacket message, final ByteBuf out) { try (MySQLPacketPayload payload = new MySQLPacketPayload(context.alloc().buffer())) { message.write(payload); out.writeMediumLE(payload.getByteBuf().readableBytes()); out.writeByte(message.getSequenceId()); out.writeBytes(payload.getByteBuf()); } } }
@Override public ChannelFuture writeSettingsAck(ChannelHandlerContext ctx, ChannelPromise promise) { try { ByteBuf buf = ctx.alloc().buffer(FRAME_HEADER_LENGTH); writeFrameHeaderInternal(buf, 0, SETTINGS, new Http2Flags().ack(true), 0); return ctx.write(buf, promise); } catch (Throwable t) { return promise.setFailure(t); } }
private ChannelFuture finishEncode(final ChannelHandlerContext ctx, ChannelPromise promise) { if (finished) { promise.setSuccess(); return promise; ByteBuf footer = ctx.alloc().heapBuffer(); if (writeHeader && wrapper == ZlibWrapper.GZIP) { footer.writeBytes(gzipHeader); if (!footer.isWritable()) { ctx.write(footer); footer = ctx.alloc().heapBuffer(); int crcValue = (int) crc.getValue(); int uncBytes = deflater.getTotalIn(); footer.writeByte(crcValue); footer.writeByte(crcValue >>> 8); footer.writeByte(crcValue >>> 16);
@Override public ChannelFuture sendPing(ChannelHandlerContext ctx) { final ByteBuf newBuff = ctx.alloc().buffer(); newBuff.ensureWritable(SSE_PING.length()); newBuff.writeCharSequence(SSE_PING, Charsets.UTF_8); return ctx.channel().writeAndFlush(newBuff); }
ByteBuf buffer; try { if (cumulation.refCnt() > 1) { buffer = expandCumulation(alloc, cumulation, in.readableBytes()); buffer.writeBytes(in); } else { CompositeByteBuf composite; composite = (CompositeByteBuf) cumulation; } else { composite = alloc.compositeBuffer(Integer.MAX_VALUE); composite.addComponent(true, cumulation);
@Override public byte[] encode() { ByteBuf body = connection.getChannel().alloc().heapBuffer(); try { encode(body); byte[] bytes = new byte[body.readableBytes()]; body.readBytes(bytes); return bytes; } finally { body.release(); } }
@Override public void broadcast(String subject, byte[] payload) { if (enabled) { Message message = new Message(subject, payload); byte[] bytes = SERIALIZER.encode(message); ByteBuf buf = serverChannel.alloc().buffer(4 + bytes.length); buf.writeInt(bytes.length).writeBytes(bytes); serverChannel.writeAndFlush(new DatagramPacket(buf, groupAddress)); } }
/** * @see <a href="https://github.com/logstash-plugins/logstash-input-beats/blob/master/PROTOCOL.md#ack-frame-type">'ack' frame type</a> */ private void sendACK(Channel channel) throws IOException { if (sequenceNum == windowSize) { final ByteBuf buffer = channel.alloc().buffer(6); buffer.writeByte(PROTOCOL_VERSION); buffer.writeByte(FRAME_ACK); buffer.writeInt((int) sequenceNum); LOG.trace("Sending ACK for sequence number {} on channel {}", sequenceNum, channel); channel.writeAndFlush(buffer); } }