@Override public <V> Future<V> newFailedFuture(Throwable cause) { return new RequestContextAwareFuture<>(context(), delegate().newFailedFuture(cause)); }
private void connect(SessionProtocol desiredProtocol, PoolKey key, CompletableFuture<PooledChannel> promise) { setPendingAcquisition(desiredProtocol, key, promise); final InetSocketAddress remoteAddress; try { remoteAddress = toRemoteAddress(key); } catch (UnknownHostException e) { notifyConnect(desiredProtocol, key, eventLoop.newFailedFuture(e), promise); return; } // Fail immediately if it is sure that the remote address doesn't support the desired protocol. if (SessionProtocolNegotiationCache.isUnsupported(remoteAddress, desiredProtocol)) { notifyConnect(desiredProtocol, key, eventLoop.newFailedFuture( new SessionProtocolNegotiationException( desiredProtocol, "previously failed negotiation")), promise); return; } // Create a new connection. final Promise<Channel> sessionPromise = eventLoop.newPromise(); connect(remoteAddress, desiredProtocol, sessionPromise); if (sessionPromise.isDone()) { notifyConnect(desiredProtocol, key, sessionPromise, promise); } else { sessionPromise.addListener((Future<Channel> future) -> { notifyConnect(desiredProtocol, key, future, promise); }); } }
@Override public <V> Future<V> newFailedFuture(Throwable arg0) { return getDelegate().newFailedFuture(arg0); }
@Override public Future<NedisClient> acquire() { List<PooledObject> pools = this.pools; if (pools.isEmpty()) { return poolBuilder.group().next().newFailedFuture(new IOException("Proxy list empty")); } for (;;) { int current = nextIdx.get(); int next = current >= pools.size() - 1 ? 0 : current + 1; if (nextIdx.compareAndSet(current, next)) { return pools.get(next).pool.acquire(); } } }
@Override public Future<Void> select(int index) { if (pool != null) { return eventLoop().newFailedFuture( new OperationNotSupportedException( "'select' is not allowed on a pooled connection")); } return select0(index); }
@Override public Future<Void> auth(byte[] password) { if (pool != null) { return eventLoop().newFailedFuture( new OperationNotSupportedException( "'auth' is not allowed on a pooled connection")); } return auth0(password); }
@Override public Future<Void> quit() { if (pool != null) { return eventLoop().newFailedFuture( new OperationNotSupportedException( "'quit' is not allowed on a pooled connection")); } return quit0(); }
@Override public Future<Void> clientSetname(byte[] name) { if (pool != null) { return eventLoop().newFailedFuture( new OperationNotSupportedException( "'client setname' is not allowed on a pooled connection")); } return clientSetname0(name); }
public Future<Void> send(T msg) { if (clientChannel.isDone()) { if (clientChannel.isSuccess()) { if(pendingDone) { Promise<Void> promise = executor.newPromise(); clientChannel.getNow().writeAndFlush(msg).addListener(f->toPromise(f,promise)); return promise; }else { return addPending(msg); } }else { ReferenceCountUtil.release(msg); return executor.newFailedFuture(clientChannel.cause()); } }else { return addPending(msg); } }
@Override public Future<Channel> getConnection(ConnectionParameters connectionParameters, HostAndPort address) { ConnectionKey key = new ConnectionKey(connectionParameters, address); while (true) { synchronized (this) { if (closed) { return group.next().newFailedFuture(new TTransportException("Connection pool is closed")); } Future<Channel> future; try { future = cachedConnections.get(key, () -> createConnection(key)); } catch (ExecutionException e) { throw new RuntimeException(e); } // connection is still opening if (!future.isDone()) { return future; } // check if connection is failed or closed if (future.getNow().isOpen()) { return future; } // remove dead connection from cache cachedConnections.asMap().remove(key, future); } } }
@Override public Future<Channel> getConnection(ConnectionParameters connectionParameters, HostAndPort address) { ConnectionKey key = new ConnectionKey(connectionParameters, address); while (true) { synchronized (this) { if (closed) { return group.next().newFailedFuture(new TTransportException("Connection pool is closed")); } Future<Channel> future; try { future = cachedConnections.get(key, () -> createConnection(key)); } catch (ExecutionException e) { throw new RuntimeException(e); } // connection is still opening if (!future.isDone()) { return future; } // check if connection is failed or closed if (future.getNow().isOpen()) { return future; } // remove dead connection from cache cachedConnections.asMap().remove(key, future); } } }
@Override public Future<Void> release(final Channel channel) { try { handler.channelReleased(channel); synchronized (sync) { leased.remove(channel.attr(CHANNEL_URI).get(), channel); if (isChannelHealthy(channel)) { available.put(channel.attr(CHANNEL_URI).get(), channel); channel.attr(CHANNEL_AVAILABLE_SINCE).set(ZonedDateTime.now(ZoneOffset.UTC)); logger.debug("Channel released to pool: " + channel.id()); } else { channelCount.decrementAndGet(); logger.debug("Channel broken on release, dispose: " + channel.id()); } sync.notify(); } } catch (Exception e) { return bootstrap.config().group().next().newFailedFuture(e); } return bootstrap.config().group().next().newSucceededFuture(null); }
public Future<RoundRobinNedisClientPool> build() { validate(); try { return new RoundRobinNedisClientPool(curatorClient, closeCurator, zkProxyDir, poolBuilder).initFuture(); } catch (Exception e) { return poolBuilder.group().next().newFailedFuture(e); } } }
@Override public Future<Channel> getConnection(ConnectionParameters connectionParameters, HostAndPort address) { try { Bootstrap bootstrap = new Bootstrap() .group(group) .channel(NioSocketChannel.class) .option(ALLOCATOR, allocator) .option(CONNECT_TIMEOUT_MILLIS, saturatedCast(connectionParameters.getConnectTimeout().toMillis())) .handler(new ThriftClientInitializer( connectionParameters.getTransport(), connectionParameters.getProtocol(), connectionParameters.getMaxFrameSize(), connectionParameters.getRequestTimeout(), connectionParameters.getSocksProxy(), connectionParameters.getSslContextParameters().map(sslContextFactory::get))); Promise<Channel> promise = group.next().newPromise(); bootstrap.connect(new InetSocketAddress(address.getHost(), address.getPort())) .addListener((ChannelFutureListener) future -> notifyConnect(future, promise)); return promise; } catch (Throwable e) { return group.next().newFailedFuture(new TTransportException(e)); } }
@Override public Future<Channel> getConnection(ConnectionParameters connectionParameters, HostAndPort address) { try { Bootstrap bootstrap = new Bootstrap() .group(group) .channel(NioSocketChannel.class) .option(ALLOCATOR, allocator) .option(CONNECT_TIMEOUT_MILLIS, saturatedCast(connectionParameters.getConnectTimeout().toMillis())) .handler(new ThriftClientInitializer( connectionParameters.getTransport(), connectionParameters.getProtocol(), connectionParameters.getMaxFrameSize(), connectionParameters.getRequestTimeout(), connectionParameters.getSocksProxy(), connectionParameters.getSslContextParameters().map(sslContextFactory::get))); Promise<Channel> promise = group.next().newPromise(); bootstrap.connect(new InetSocketAddress(address.getHost(), address.getPort())) .addListener((ChannelFutureListener) future -> notifyConnect(future, promise)); return promise; } catch (Throwable e) { return group.next().newFailedFuture(new TTransportException(e)); } }
public <T> Future<ClientRequestChannel<T>> channel(ClientConfig config, ClientRequestChannelInitializer<T> clientRequestChannelHandler, Promise<T> resultPromise) { ChannelPool pool = channelPoolMap.get(config.endpoint()); Future<Channel> channelFuture = pool.acquire(); if (channelFuture.isDone()) { if (channelFuture.isSuccess()) { Channel channel = channelFuture.getNow(); return clientRequestChannel(config,clientRequestChannelHandler, pool,channel,resultPromise); } else { return eventLoopGroup.next().newFailedFuture(channelFuture.cause()); } } else { EventLoop eventLoop = eventLoopGroup.next(); Promise<ClientRequestChannel<T>> clientPromise = eventLoop.newPromise(); channelFuture.addListener(f -> { if (channelFuture.isSuccess()) { Future<ClientRequestChannel<T>> fcrc = clientRequestChannel(config,clientRequestChannelHandler, pool,channelFuture.getNow(),resultPromise); Promises.toPromise(fcrc,clientPromise); } else { clientPromise.setFailure(channelFuture.cause()); } }); return clientPromise; } }
@Override public Future<NedisClient> acquire() { synchronized (pool) { if (closed) { return group.next().<NedisClient>newFailedFuture( new IllegalStateException("already closed")); } if (numConns < maxPooledConns) { numConns++; return newClient().addListener(acquireFutureListener); } if (!pool.isEmpty()) { NedisClient client = pool.head(exclusive); return client.eventLoop().newSucceededFuture(client); } if (exclusive) { numConns++; return newClient().addListener(acquireFutureListener); } else { // If connection is shared, then we should not create more connections than // maxPooledConns. So here we add a promise to pending queue. The promise will be // notified when there are connections in pool. Promise<NedisClient> promise = group.next().newPromise(); pendingAcquireList.add(promise); return promise; } } }