protected boolean isAuthenticated() { return (authEvent != null && authEvent.isSuccess()); }
public ChannelFuture sendPing() { return pushProtocol.sendPing(ctx); }
public ChannelFuture sendPushMessage(ByteBuf mesg) { return pushProtocol.sendPushMessage(ctx, mesg); }
@Override protected void initChannel(Channel ch) throws Exception { final ChannelPipeline pipeline = ch.pipeline(); storeChannel(ch); addTcpRelatedHandlers(pipeline); addHttp1Handlers(pipeline); addPushHandlers(pipeline); }
/** * Register authenticated client - represented by PushAuthEvent - with PushConnectionRegistry of this instance. * * For all but really simplistic case - basically anything other than a single node push cluster, You'd most likely * need some sort of off-box, partitioned, global registration registry that keeps track of which client is connected * to which push server instance. You should override this default implementation for such cases and register your * client with your global registry in addition to local push connection registry that is limited to this JVM instance * Make sure such a registration is done in strictly non-blocking fashion lest you will block Netty event loop * decimating your throughput. * * A typical arrangement is to use something like Memcached or redis cluster sharded by client connection key and * to use blocking Memcached/redis driver in a background thread-pool to do the actual registration so that Netty * event loop doesn't block */ protected void registerClient(ChannelHandlerContext ctx, PushUserAuth authEvent, PushConnection conn, PushConnectionRegistry registry) { registry.put(authEvent.getClientIdentity(), conn); //Make client reconnect after ttl seconds by closing this connection to limit stickiness of the client ctx.executor().schedule(this::requestClientToCloseConnection, ditheredReconnectDeadline(), TimeUnit.SECONDS); }
@Override protected void addPushHandlers(final ChannelPipeline pipeline) { pipeline.addLast(PushAuthHandler.NAME, pushAuthHandler); pipeline.addLast(new WebSocketServerCompressionHandler()); pipeline.addLast(new WebSocketServerProtocolHandler(PushProtocol.WEBSOCKET.getPath(), null, true)); pipeline.addLast(new PushRegistrationHandler(pushConnectionRegistry, PushProtocol.WEBSOCKET)); pipeline.addLast(new SampleWebSocketPushClientProtocolHandler()); }
private void tearDown() { if (! destroyed.get()) { destroyed.set(true); if (authEvent != null) { pushConnectionRegistry.remove(authEvent.getClientIdentity()); logger.debug("Closing connection for {}", authEvent); } } }
public void put(final String clientId, final PushConnection pushConnection) { pushConnection.setSecureToken(mintNewSecureToken()); clientPushConnectionMap.put(clientId, pushConnection); }
public ChannelFuture sendPushMessage(String mesg) { return sendPushMessage(Unpooled.copiedBuffer(mesg, Charsets.UTF_8)); }
@Override protected void initChannel(Channel ch) throws Exception { final ChannelPipeline pipeline = ch.pipeline(); storeChannel(ch); addTcpRelatedHandlers(pipeline); addHttp1Handlers(pipeline); addPushHandlers(pipeline); }
/** * Register authenticated client - represented by PushAuthEvent - with PushConnectionRegistry of this instance. * * For all but really simplistic case - basically anything other than a single node push cluster, You'd most likely * need some sort of off-box, partitioned, global registration registry that keeps track of which client is connected * to which push server instance. You should override this default implementation for such cases and register your * client with your global registry in addition to local push connection registry that is limited to this JVM instance * Make sure such a registration is done in strictly non-blocking fashion lest you will block Netty event loop * decimating your throughput. * * A typical arrangement is to use something like Memcached or redis cluster sharded by client connection key and * to use blocking Memcached/redis driver in a background thread-pool to do the actual registration so that Netty * event loop doesn't block */ protected void registerClient(ChannelHandlerContext ctx, PushUserAuth authEvent, PushConnection conn, PushConnectionRegistry registry) { registry.put(authEvent.getClientIdentity(), conn); //Make client reconnect after ttl seconds by closing this connection to limit stickiness of the client ctx.executor().schedule(this::requestClientToCloseConnection, ditheredReconnectDeadline(), TimeUnit.SECONDS); }
@Override protected void addPushHandlers(final ChannelPipeline pipeline) { pipeline.addLast(PushAuthHandler.NAME, pushAuthHandler); pipeline.addLast(new WebSocketServerCompressionHandler()); pipeline.addLast(new WebSocketServerProtocolHandler(PushProtocol.WEBSOCKET.getPath(), null, true)); pipeline.addLast(new PushRegistrationHandler(pushConnectionRegistry, PushProtocol.WEBSOCKET)); pipeline.addLast(new SampleWebSocketPushClientProtocolHandler()); }
private void tearDown() { if (! destroyed.get()) { destroyed.set(true); if (authEvent != null) { pushConnectionRegistry.remove(authEvent.getClientIdentity()); logger.debug("Closing connection for {}", authEvent); } } }
protected final boolean isAuthenticated() { return (authEvent != null && authEvent.isSuccess()); }
public void put(final String clientId, final PushConnection pushConnection) { pushConnection.setSecureToken(mintNewSecureToken()); clientPushConnectionMap.put(clientId, pushConnection); }
public ChannelFuture sendPing() { return pushProtocol.sendPing(ctx); }
public ChannelFuture sendPushMessage(ByteBuf mesg) { return pushProtocol.sendPushMessage(ctx, mesg); }
public ChannelFuture sendPushMessage(String mesg) { return sendPushMessage(Unpooled.copiedBuffer(mesg, Charsets.UTF_8)); }
protected final boolean isAuthenticated() { return (authEvent != null && authEvent.isSuccess()); }
protected boolean isAuthenticated() { return (authEvent != null && authEvent.isSuccess()); }