private ByteBuf allocateBuffer() { return channel.alloc().buffer( maxBufferSize ); }
/** * Remove a {@link ByteBuf} from the queue with the specified number of bytes. Any added buffer who's bytes are * fully consumed during removal will have it's promise completed when the passed aggregate {@link ChannelPromise} * completes. * * @param bytes the maximum number of readable bytes in the returned {@link ByteBuf}, if {@code bytes} is greater * than {@link #readableBytes} then a buffer of length {@link #readableBytes} is returned. * @param aggregatePromise used to aggregate the promises and listeners for the constituent buffers. * @return a {@link ByteBuf} composed of the enqueued buffers. */ public ByteBuf remove(int bytes, ChannelPromise aggregatePromise) { return remove(channel.alloc(), bytes, aggregatePromise); }
/** * @see <a href="https://github.com/logstash-plugins/logstash-input-beats/blob/master/PROTOCOL.md#ack-frame-type">'ack' frame type</a> */ private void sendACK(Channel channel) throws IOException { if (sequenceNum == windowSize) { final ByteBuf buffer = channel.alloc().buffer(6); buffer.writeByte(PROTOCOL_VERSION); buffer.writeByte(FRAME_ACK); buffer.writeInt((int) sequenceNum); LOG.trace("Sending ACK for sequence number {} on channel {}", sequenceNum, channel); channel.writeAndFlush(buffer); } }
/** * Remove a {@link ByteBuf} from the queue with the specified number of bytes. Any added buffer who's bytes are * fully consumed during removal will have it's promise completed when the passed aggregate {@link ChannelPromise} * completes. * * @param bytes the maximum number of readable bytes in the returned {@link ByteBuf}, if {@code bytes} is greater * than {@link #readableBytes} then a buffer of length {@link #readableBytes} is returned. * @param aggregatePromise used to aggregate the promises and listeners for the constituent buffers. * @return a {@link ByteBuf} composed of the enqueued buffers. */ public ByteBuf remove(int bytes, ChannelPromise aggregatePromise) { return remove(channel.alloc(), bytes, aggregatePromise); }
/** * Remove a {@link ByteBuf} from the queue with the specified number of bytes. Any added buffer who's bytes are * fully consumed during removal will have it's promise completed when the passed aggregate {@link ChannelPromise} * completes. * * @param bytes the maximum number of readable bytes in the returned {@link ByteBuf}, if {@code bytes} is greater * than {@link #readableBytes} then a buffer of length {@link #readableBytes} is returned. * @param aggregatePromise used to aggregate the promises and listeners for the constituent buffers. * @return a {@link ByteBuf} composed of the enqueued buffers. */ public ByteBuf remove(int bytes, ChannelPromise aggregatePromise) { return remove(channel.alloc(), bytes, aggregatePromise); }
@Override public ByteBufAllocator alloc() { final Channel channel = channel(); return channel != null ? channel.alloc() : PooledByteBufAllocator.DEFAULT; } }
private ChannelHandler nettyServerHandler( Channel channel, SslContext sslContext ) { SSLEngine sslEngine = sslContext.newEngine( channel.alloc() ); return new SslHandler( sslEngine ); }
@Override public byte[] encode() { ByteBuf body = connection.getChannel().alloc().heapBuffer(); try { encode(body); byte[] bytes = new byte[body.readableBytes()]; body.readBytes(bytes); return bytes; } finally { body.release(); } }
@Override public void broadcast(String subject, byte[] payload) { if (enabled) { Message message = new Message(subject, payload); byte[] bytes = SERIALIZER.encode(message); ByteBuf buf = serverChannel.alloc().buffer(4 + bytes.length); buf.writeInt(bytes.length).writeBytes(bytes); serverChannel.writeAndFlush(new DatagramPacket(buf, groupAddress)); } }
@Override protected void initChannel(Channel ch) throws Exception { ChannelPipeline pipeline = ch.pipeline(); pipeline.addLast(new SslHandler(sslCtx.newEngine(ch.alloc()))); pipeline.addLast(new ChunkedWriteHandler()); pipeline.addLast(new WriteStreamHandler()); }
@Override protected void initChannel(Channel ch) throws Exception { final ChannelPipeline pipeline = ch.pipeline(); pipeline.addLast(new PassportStateOriginHandler()); if (connectionPoolConfig.isSecure()) { pipeline.addLast("ssl", sslContext.newHandler(ch.alloc())); } pipeline.addLast(HTTP_CODEC_HANDLER_NAME, new HttpClientCodec( BaseZuulChannelInitializer.MAX_INITIAL_LINE_LENGTH.get(), BaseZuulChannelInitializer.MAX_HEADER_SIZE.get(), BaseZuulChannelInitializer.MAX_CHUNK_SIZE.get(), false, false )); pipeline.addLast(PassportStateHttpClientHandler.PASSPORT_STATE_HTTP_CLIENT_HANDLER_NAME, new PassportStateHttpClientHandler()); pipeline.addLast("originNettyLogger", nettyLogger); pipeline.addLast(httpMetricsHandler); addMethodBindingHandler(pipeline); pipeline.addLast("httpLifecycle", new HttpClientLifecycleChannelHandler()); pipeline.addLast("connectionPoolHandler", connectionPoolHandler); }
@Override public ActiveMQBuffer createTransportBuffer(final int size) { try { return new ChannelBufferWrapper(channel.alloc().directBuffer(size), true); } catch (OutOfMemoryError oom) { final long totalPendingWriteBytes = batchBufferSize(this.channel, this.writeBufferHighWaterMark); // I'm not using the ActiveMQLogger framework here, as I wanted the class name to be very specific here logger.warn("Trying to allocate " + size + " bytes, System is throwing OutOfMemoryError on NettyConnection " + this + ", there are currently " + "pendingWrites: [NETTY] -> " + totalPendingWriteBytes + "[EVENT LOOP] -> " + pendingWritesOnEventLoopView.get() + " causes: " + oom.getMessage(), oom); throw oom; } }
@Override protected void initChannel(Channel ch) throws Exception { SSLEngine engine = context.newEngine(ch.alloc()); ch.pipeline().addFirst("ssl", new SslHandler(engine, startTls)); } }
@Override public Object toFrame(Channel channel) { int capacity = cmd == HEARTBEAT.cmd ? 1 : HEADER_LEN + getBodyLength(); ByteBuf out = channel.alloc().buffer(capacity, capacity); encodePacket(this, out); return new DatagramPacket(out, sender()); } }
@Override protected void initChannel(Channel ch) throws Exception { ChannelPipeline pipeline = ch.pipeline(); SSLEngine engine = context.newEngine(ch.alloc()); pipeline.addFirst("ssl", new SslHandler(engine)); if (isClient) { pipeline.addLast("codec", new HttpClientCodec()); } else { pipeline.addLast("codec", new HttpServerCodec()); } } }
@Override protected void initChannel(Channel ch) throws Exception { SslHandler sslHandler = sslContext.newHandler(ch.alloc()); sslHandler.engine().setEnabledProtocols(sslContextFactory.getProtocols()); // Configure our pipeline of ChannelHandlerS. ChannelPipeline pipeline = ch.pipeline(); storeChannel(ch); addTimeoutHandlers(pipeline); addPassportHandler(pipeline); addTcpRelatedHandlers(pipeline); pipeline.addLast("ssl", sslHandler); addSslInfoHandlers(pipeline, isSSlFromIntermediary); addSslClientCertChecks(pipeline); addHttp1Handlers(pipeline); addHttpRelatedHandlers(pipeline); addZuulHandlers(pipeline); } }
@Override protected SslHandler newSslHandler(ChannelPipeline pipeline) { SslHandler toReturn = new SslHandler(engineFactory.call(pipeline.channel().alloc())); configureHandler(toReturn); return toReturn; }
protected final ByteBuf toByteBuf(HttpData data) { if (data instanceof ByteBufHolder) { return ((ByteBufHolder) data).content(); } final ByteBuf buf = channel().alloc().directBuffer(data.length(), data.length()); buf.writeBytes(data.array(), data.offset(), data.length()); return buf; } }
@Override protected void initChannel(Channel ch) throws Exception { final ChannelPipeline pipeline = ch.pipeline(); pipeline.addLast(new PassportStateOriginHandler()); if (connectionPoolConfig.isSecure()) { pipeline.addLast("ssl", sslContext.newHandler(ch.alloc())); } pipeline.addLast(HTTP_CODEC_HANDLER_NAME, new HttpClientCodec( BaseZuulChannelInitializer.MAX_INITIAL_LINE_LENGTH.get(), BaseZuulChannelInitializer.MAX_HEADER_SIZE.get(), BaseZuulChannelInitializer.MAX_CHUNK_SIZE.get(), false, false )); pipeline.addLast(PassportStateHttpClientHandler.PASSPORT_STATE_HTTP_CLIENT_HANDLER_NAME, new PassportStateHttpClientHandler()); pipeline.addLast("originNettyLogger", nettyLogger); pipeline.addLast(httpMetricsHandler); addMethodBindingHandler(pipeline); pipeline.addLast("httpLifecycle", new HttpClientLifecycleChannelHandler()); pipeline.addLast("connectionPoolHandler", connectionPoolHandler); }
@Override protected void initChannel(Channel ch) throws Exception { SslHandler sslHandler = sslContext.newHandler(ch.alloc()); sslHandler.engine().setEnabledProtocols(sslContextFactory.getProtocols()); // Configure our pipeline of ChannelHandlerS. ChannelPipeline pipeline = ch.pipeline(); storeChannel(ch); addTimeoutHandlers(pipeline); addPassportHandler(pipeline); addTcpRelatedHandlers(pipeline); pipeline.addLast("ssl", sslHandler); addSslInfoHandlers(pipeline, isSSlFromIntermediary); addSslClientCertChecks(pipeline); addHttp1Handlers(pipeline); addHttpRelatedHandlers(pipeline); addZuulHandlers(pipeline); } }