private void setupReceiver(int timeoutMs) { AckHandler ackHandler = new AckHandler(timeoutMs); for (Channel ch : datanodeList) { ch.pipeline().addLast( new IdleStateHandler(timeoutMs, timeoutMs / 2, 0, TimeUnit.MILLISECONDS), new ProtobufVarint32FrameDecoder(), new ProtobufDecoder(PipelineAckProto.getDefaultInstance()), ackHandler); ch.config().setAutoRead(true); } }
@Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { final ChannelConfig config = ctx.channel().config(); if (config.isAutoRead()) { // stop accept new connections for 1 second to allow the channel to recover // See https://github.com/netty/netty/issues/1328 config.setAutoRead(false); ctx.channel().eventLoop().schedule(enableAutoReadTask, 1, TimeUnit.SECONDS); } // still let the exceptionCaught event flow through the pipeline to give the user // a chance to do something with it ctx.fireExceptionCaught(cause); } }
public LocalChannel() { super(null); config().setAllocator(new PreferHeapByteBufAllocator(config.getAllocator())); }
final ChannelConfig config = config(); final ChannelPipeline pipeline = pipeline(); final ByteBufAllocator allocator = config.getAllocator(); final RecvByteBufAllocator.Handle allocHandle = recvBufAllocHandle(); allocHandle.reset(config); if (!readPending && !config.isAutoRead()) { removeReadOp();
@Override public ByteBufAllocator alloc() { return config().getAllocator(); }
@Override public long bytesBeforeUnwritable() { // TODO: Do a proper impl return config().getWriteBufferHighWaterMark(); }
@Override public int getWriteBufferLowWaterMark() { return min(parent().config().getWriteBufferLowWaterMark(), initialOutboundStreamWindow); }
private EventExecutor childExecutor(EventExecutorGroup group) { if (group == null) { return null; } Boolean pinEventExecutor = channel.config().getOption(ChannelOption.SINGLE_EVENTEXECUTOR_PER_GROUP); if (pinEventExecutor != null && !pinEventExecutor) { return group.next(); } Map<EventExecutorGroup, EventExecutor> childExecutors = this.childExecutors; if (childExecutors == null) { // Use size of 4 as most people only use one extra EventExecutor. childExecutors = this.childExecutors = new IdentityHashMap<EventExecutorGroup, EventExecutor>(4); } // Pin one of the child executors once and remember it so that the same child executor // is used to fire events for the same channel. EventExecutor childExecutor = childExecutors.get(group); if (childExecutor == null) { childExecutor = group.next(); childExecutors.put(group, childExecutor); } return childExecutor; } @Override
final MessageSizeEstimator.Handle estimatorHandle() { MessageSizeEstimator.Handle handle = estimatorHandle; if (handle == null) { handle = channel.config().getMessageSizeEstimator().newHandle(); if (!ESTIMATOR.compareAndSet(this, null, handle)) { handle = estimatorHandle; } } return handle; }
@Override public RecvByteBufAllocator.ExtendedHandle recvBufAllocHandle() { if (recvHandle == null) { recvHandle = (RecvByteBufAllocator.ExtendedHandle) config().getRecvByteBufAllocator().newHandle(); } return recvHandle; }
for (int i = config().getWriteSpinCount() - 1; i >= 0; i--) { if (doWriteMessage(msg, in)) { done = true;
doWrite(outboundBuffer); } catch (Throwable t) { if (t instanceof IOException && config().isAutoClose()) {
int connectTimeoutMillis = config().getConnectTimeoutMillis(); if (connectTimeoutMillis > 0) { connectTimeoutFuture = eventLoop().schedule(new Runnable() {
@Override public void run() { ChannelConfig config = ctx.channel().config(); if (!config.isAutoRead() && isHandlerActive(ctx)) { logger.debug("Not unsuspend: " + config.isAutoRead() + ':' + isHandlerActive(ctx)); if (config.isAutoRead() && !isHandlerActive(ctx)) { logger.debug("Unsuspend: " + config.isAutoRead() + ':' + isHandlerActive(ctx)); } else { logger.debug("Normal unsuspend: " + config.isAutoRead() + ':' + isHandlerActive(ctx)); config.setAutoRead(true); ctx.channel().read(); logger.debug("Unsuspend final status => " + config.isAutoRead() + ':' + isHandlerActive(ctx));
public LocalServerChannel() { config().setAllocator(new PreferHeapByteBufAllocator(config.getAllocator())); }
final ByteBufAllocator allocator = config.getAllocator(); final RecvByteBufAllocator.Handle allocHandle = unsafe().recvBufAllocHandle(); allocHandle.reset(config); handleReadException(pipeline, byteBuf, t, close, allocHandle); } finally { if (readPending || config.isAutoRead() || !readData && isActive()) {
@Override public ByteBufAllocator alloc() { return config().getAllocator(); }
@Override public int getWriteBufferHighWaterMark() { return min(parent().config().getWriteBufferHighWaterMark(), initialOutboundStreamWindow); }
private int minUsableChannelBytes() { // The current allocation algorithm values "fairness" and doesn't give any consideration to "goodput". It // is possible that 1 byte will be allocated to many streams. In an effort to try to make "goodput" // reasonable with the current allocation algorithm we have this "cheap" check up front to ensure there is // an "adequate" amount of connection window before allocation is attempted. This is not foolproof as if the // number of streams is >= this minimal number then we may still have the issue, but the idea is to narrow the // circumstances in which this can happen without rewriting the allocation algorithm. return max(ctx.channel().config().getWriteBufferLowWaterMark(), MIN_WRITABLE_CHUNK); }
private void closeOnRead(ChannelPipeline pipeline) { if (!isInputShutdown0()) { if (Boolean.TRUE.equals(config().getOption(ChannelOption.ALLOW_HALF_CLOSURE))) { shutdownInput(); pipeline.fireUserEventTriggered(ChannelInputShutdownEvent.INSTANCE); } else { close(voidPromise()); } } else { pipeline.fireUserEventTriggered(ChannelInputShutdownReadComplete.INSTANCE); } }