public void resolveHostname(String hostname, Handler<AsyncResult<InetAddress>> resultHandler) { ContextInternal callback = (ContextInternal) vertx.getOrCreateContext(); io.netty.resolver.AddressResolver<InetSocketAddress> resolver = resolverGroup.getResolver(callback.nettyEventLoop()); io.netty.util.concurrent.Future<InetSocketAddress> fut = resolver.resolve(InetSocketAddress.createUnresolved(hostname, 0)); fut.addListener(a -> { callback.runOnContext(v -> { if (a.isSuccess()) { InetSocketAddress address = fut.getNow(); resultHandler.handle(Future.succeededFuture(address.getAddress())); } else { resultHandler.handle(Future.failedFuture(a.cause())); } }); }); }
public synchronized void removeHandler(T handler, ContextInternal context) { EventLoop worker = context.nettyEventLoop(); Handlers<T> handlers = handlerMap.get(worker); if (!handlers.removeHandler(new HandlerHolder<>(context, handler))) { throw new IllegalStateException("Can't find handler"); } if (handlers.isEmpty()) { handlerMap.remove(worker); } if (handlerMap.isEmpty()) { hasHandlers = false; } //Available workers does it's own reference counting -since workers can be shared across different Handlers availableWorkers.removeWorker(worker); }
public synchronized void addHandler(T handler, ContextInternal context) { EventLoop worker = context.nettyEventLoop(); availableWorkers.addWorker(worker); Handlers<T> handlers = new Handlers<>(); Handlers<T> prev = handlerMap.putIfAbsent(worker, handlers); if (prev != null) { handlers = prev; } handlers.addHandler(new HandlerHolder<>(context, handler)); hasHandlers = true; }
public void connect(SocketAddress remoteAddress, SocketAddress peerAddress, String serverName, Handler<AsyncResult<Channel>> channelHandler) { Handler<AsyncResult<Channel>> handler = res -> { if (Context.isOnEventLoopThread()) { channelHandler.handle(res); } else { // We are on the GlobalEventExecutor context.nettyEventLoop().execute(() -> channelHandler.handle(res)); } }; if (proxyOptions != null) { handleProxyConnect(remoteAddress, peerAddress, serverName, handler); } else { handleConnect(remoteAddress, peerAddress, serverName, handler); } }
/** * Check whether the pool can make progress toward satisfying the waiters. */ private void checkProgress() { if (!checkInProgress && (canProgress() || canClose())) { checkInProgress = true; context.nettyEventLoop().execute(this::checkPendingTasks); } }
synchronized FakeConnection connect() { if (status != DISCONNECTED) { throw new IllegalStateException(); } status = CONNECTING; context.nettyEventLoop().execute(() -> { synchronized (FakeConnection.this) { status = CONNECTED; future.complete(new ConnectResult<>(this, concurrency, 1)); } }); return this; }
@SuppressWarnings("unchecked") private <T> void lookupList(String name, Handler<AsyncResult<List<T>>> handler, DnsRecordType... types) { Objects.requireNonNull(name, "no null name accepted"); EventLoop el = actualCtx.nettyEventLoop(); if (el.inEventLoop()) { new Query(name, types, handler).run(); } else { el.execute(() -> { new Query(name, types, handler).run(); }); } }
void fail(Throwable err) { context.nettyEventLoop().execute(() -> future.tryFail(err)); } }
bufAllocator.maxMessagesPerRead(1); channel.config().setAllocator(PartialPooledByteBufAllocator.INSTANCE); actualCtx.nettyEventLoop().register(channel); if (options.getLogActivity()) { channel.pipeline().addLast("logging", new LoggingHandler());
@Test public void testWorkerExecuteFromIo() throws Exception { AtomicReference<ContextInternal> workerContext = new AtomicReference<>(); CountDownLatch latch = new CountDownLatch(1); vertx.deployVerticle(new AbstractVerticle() { @Override public void start() throws Exception { workerContext.set((ContextInternal) context); latch.countDown(); } }, new DeploymentOptions().setWorker(true)); awaitLatch(latch); workerContext.get().nettyEventLoop().execute(() -> { assertNull(Vertx.currentContext()); workerContext.get().nettyEventLoop().execute(() -> { workerContext.get().executeFromIO(v -> { assertSame(workerContext.get(), Vertx.currentContext()); assertTrue(Context.isOnWorkerThread()); testComplete(); }); }); }); await(); }
sslHelper.validate(vertx); Bootstrap bootstrap = new Bootstrap(); bootstrap.group(context.nettyEventLoop()); bootstrap.channelFactory(vertx.transport().channelFactory(remoteAddress.path() != null));
bootstrap.group(context.nettyEventLoop()); bootstrap.channelFactory(client.getVertx().transport().channelFactory(false));
private DatagramSocketImpl(VertxInternal vertx, DatagramSocketOptions options) { Transport transport = vertx.transport(); DatagramChannel channel = transport.datagramChannel(options.isIpV6() ? InternetProtocolFamily.IPv6 : InternetProtocolFamily.IPv4); transport.configure(channel, new DatagramSocketOptions(options)); ContextInternal context = vertx.getOrCreateContext(); channel.config().setOption(ChannelOption.DATAGRAM_CHANNEL_ACTIVE_ON_REGISTRATION, true); MaxMessagesRecvByteBufAllocator bufAllocator = channel.config().getRecvByteBufAllocator(); bufAllocator.maxMessagesPerRead(1); context.nettyEventLoop().register(channel); if (options.getLogActivity()) { channel.pipeline().addLast("logging", new LoggingHandler()); } VertxMetrics metrics = vertx.metricsSPI(); this.metrics = metrics != null ? metrics.createDatagramSocketMetrics(options) : null; this.channel = channel; this.context = context; this.demand = Long.MAX_VALUE; }
awaitLatch(latch); ServerBootstrap bs = new ServerBootstrap(); bs.group(context.nettyEventLoop()); bs.channelFactory(((VertxInternal)vertx).transport().serverChannelFactory(false)) ; bs.option(ChannelOption.SO_BACKLOG, 100);
@Test public void testEventLoopExecuteFromIo() throws Exception { ContextInternal eventLoopContext = (ContextInternal) vertx.getOrCreateContext(); // Check from other thread try { eventLoopContext.executeFromIO(v -> fail()); fail(); } catch (IllegalStateException expected) { } // Check from event loop thread eventLoopContext.nettyEventLoop().execute(() -> { // Should not be set yet assertNull(Vertx.currentContext()); Thread vertxThread = Thread.currentThread(); AtomicBoolean nested = new AtomicBoolean(true); eventLoopContext.executeFromIO(v -> { assertTrue(nested.get()); assertSame(eventLoopContext, Vertx.currentContext()); assertSame(vertxThread, Thread.currentThread()); }); nested.set(false); testComplete(); }); await(); }
public synchronized void addHandler(T handler, ContextInternal context) { EventLoop worker = context.nettyEventLoop(); availableWorkers.addWorker(worker); Handlers<T> handlers = new Handlers<>(); Handlers<T> prev = handlerMap.putIfAbsent(worker, handlers); if (prev != null) { handlers = prev; } handlers.addHandler(new HandlerHolder<>(context, handler)); hasHandlers = true; }
/** * Check whether the pool can make progress toward satisfying the waiters. */ private void checkProgress() { if (!checkInProgress && (canProgress() || canClose())) { checkInProgress = true; context.nettyEventLoop().execute(this::checkPendingTasks); } }
synchronized FakeConnection connect() { if (status != DISCONNECTED) { throw new IllegalStateException(); } status = CONNECTING; context.nettyEventLoop().execute(() -> { synchronized (FakeConnection.this) { status = CONNECTED; future.complete(new ConnectResult<>(this, concurrency, 1)); } }); return this; }
@SuppressWarnings("unchecked") private <T> void lookupList(String name, Handler<AsyncResult<List<T>>> handler, DnsRecordType... types) { Objects.requireNonNull(name, "no null name accepted"); EventLoop el = actualCtx.nettyEventLoop(); if (el.inEventLoop()) { new Query(name, types, handler).run(); } else { el.execute(() -> { new Query(name, types, handler).run(); }); } }
void fail(Throwable err) { context.nettyEventLoop().execute(() -> future.tryFail(err)); } }