private void shutdown0() { if (channel != null) { channel.close(); channel = null; } }
@Override protected synchronized void callTimeout(Call call) { if (channel != null) { channel.pipeline().fireUserEventTriggered(new CallEvent(TIMEOUT, call)); } }
/** * The close method when error occurred. Now we just call recoverFileLease. */ @Override public void recoverAndClose(CancelableProgressable reporter) throws IOException { datanodeList.forEach(ch -> ch.close()); datanodeList.forEach(ch -> ch.closeFuture().awaitUninterruptibly()); endFileLease(client, fileId); fsUtils.recoverFileLease(dfs, new Path(src), conf, reporter == null ? new CancelOnClose(client) : reporter); }
@Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable e) { allChannels.remove(ctx.channel()); NettyRpcServer.LOG.trace("Connection {}; caught unexpected downstream exception.", ctx.channel().remoteAddress(), e.getCause()); ctx.channel().close(); } }
@Override public void operationComplete(Future<Boolean> future) throws Exception { if (future.isSuccess()) { ChannelPipeline p = ch.pipeline(); p.remove(SaslChallengeDecoder.class); p.remove(NettyHBaseSaslRpcClientHandler.class); Promise<Boolean> connectionHeaderPromise = ch.eventLoop().newPromise(); } else { ch.write(connectionHeaderWithLength.retainedDuplicate()); established(ch);
@Override public void operationComplete(ChannelFuture future) throws Exception { Channel ch = future.channel(); if (!future.isSuccess()) { failInit(ch, toIOE(future.cause())); rpcClient.failedServers.addToFailedServers(remoteId.address, future.cause()); return; } ch.writeAndFlush(connectionHeaderPreamble.retainedDuplicate()); if (useSasl) { saslNegotiate(ch); } else { // send the connection header to server ch.write(connectionHeaderWithLength.retainedDuplicate()); established(ch); } } }).channel();
@Override protected void doRespond(RpcResponse resp) { channel.writeAndFlush(resp); } }
@Override public synchronized void stop() { if (!running) { return; } LOG.info("Stopping server on " + this.serverChannel.localAddress()); if (authTokenSecretMgr != null) { authTokenSecretMgr.stop(); authTokenSecretMgr = null; } allChannels.close().awaitUninterruptibly(); serverChannel.close(); scheduler.stop(); closed.countDown(); running = false; }
@Override public boolean add(Channel channel) { ConcurrentMap<ChannelId, Channel> map = channel instanceof ServerChannel? serverChannels : nonServerChannels; boolean added = map.putIfAbsent(channel.id(), channel) == null; if (added) { channel.closeFuture().addListener(remover); } if (stayClosed && closed) { // First add channel, than check if closed. // Seems inefficient at first, but this way a volatile // gives us enough synchronization to be thread-safe. // // If true: Close right away. // (Might be closed a second time by ChannelGroup.close(), but this is ok) // // If false: Channel will definitely be closed by the ChannelGroup. // (Because closed=true always happens-before ChannelGroup.close()) // // See https://github.com/netty/netty/issues/4020 channel.close(); } return added; }
private void setupReceiver(int timeoutMs) { AckHandler ackHandler = new AckHandler(timeoutMs); for (Channel ch : datanodeList) { ch.pipeline().addLast( new IdleStateHandler(timeoutMs, timeoutMs / 2, 0, TimeUnit.MILLISECONDS), new ProtobufVarint32FrameDecoder(), new ProtobufDecoder(PipelineAckProto.getDefaultInstance()), ackHandler); ch.config().setAutoRead(true); } }
@Override public synchronized InetSocketAddress getListenerAddress() { return ((InetSocketAddress) serverChannel.localAddress()); }
public void join() { serverChannel.closeFuture().awaitUninterruptibly(); }
private static void registerDone(ChannelFuture future) { // Handle any errors that occurred on the local thread while registering. Even though // failures can happen after this point, they will be handled by the channel by closing the // childChannel. if (!future.isSuccess()) { Channel childChannel = future.channel(); if (childChannel.isRegistered()) { childChannel.close(); } else { childChannel.unsafe().closeForcibly(); } } }
ctx.channel().config().setAutoRead(false); promise.trySuccess(ctx.channel());
private static void requestWriteBlock(Channel channel, Enum<?> storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder) throws IOException { OpWriteBlockProto proto = STORAGE_TYPE_SETTER.set(writeBlockProtoBuilder, storageType).build(); int protoLen = proto.getSerializedSize(); ByteBuf buffer = channel.alloc().buffer(3 + CodedOutputStream.computeRawVarint32Size(protoLen) + protoLen); buffer.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); buffer.writeByte(Op.WRITE_BLOCK.code); proto.writeDelimitedTo(new ByteBufOutputStream(buffer)); channel.writeAndFlush(buffer); }
@Override public boolean remove(Object o) { Channel c = null; if (o instanceof ChannelId) { c = nonServerChannels.remove(o); if (c == null) { c = serverChannels.remove(o); } } else if (o instanceof Channel) { c = (Channel) o; if (c instanceof ServerChannel) { c = serverChannels.remove(c.id()); } else { c = nonServerChannels.remove(c.id()); } } if (c == null) { return false; } c.closeFuture().removeListener(remover); return true; }
@Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { final ChannelConfig config = ctx.channel().config(); if (config.isAutoRead()) { // stop accept new connections for 1 second to allow the channel to recover // See https://github.com/netty/netty/issues/1328 config.setAutoRead(false); ctx.channel().eventLoop().schedule(enableAutoReadTask, 1, TimeUnit.SECONDS); } // still let the exceptionCaught event flow through the pipeline to give the user // a chance to do something with it ctx.fireExceptionCaught(cause); } }