private void scheduleNewIdleChannelDetector(TimerTask task) { nettyTimer.newTimeout(task, cleanerPeriod, TimeUnit.MILLISECONDS); }
public static void release() { if (instance != null) { instance.stop(); } instance = null; }
private void startHeartBeat(final int heartbeat) throws Exception { HASHED_WHEEL_TIMER.newTimeout(new TimerTask() { @Override public void run(Timeout timeout) throws Exception { if (connection.isConnected() && healthCheck()) { HASHED_WHEEL_TIMER.newTimeout(this, heartbeat, TimeUnit.MILLISECONDS); } } }, heartbeat, TimeUnit.MILLISECONDS); }
@Override public void close() { if (closed.compareAndSet(false, true)) { try { channelManager.close(); } catch (Throwable t) { LOGGER.warn("Unexpected error on ChannelManager close", t); } if (allowStopNettyTimer) { try { nettyTimer.stop(); } catch (Throwable t) { LOGGER.warn("Unexpected error on HashedWheelTimer close", t); } } } }
private Timeout newTimeout(TimerTask task, long delay) { return requestSender.isClosed() ? null : nettyTimer.newTimeout(task, delay, TimeUnit.MILLISECONDS); } }
@Override protected void doStop(Listener listener) throws Throwable { pool.close(); workerGroup.shutdownGracefully(); timer.stop(); listener.onSuccess(); } }
@Override public void run(Timeout timeout) throws Exception { if (connection.isConnected() && healthCheck()) { HASHED_WHEEL_TIMER.newTimeout(this, heartbeat, TimeUnit.MILLISECONDS); } } }, heartbeat, TimeUnit.MILLISECONDS);
@Override public void shutdown() throws PulsarClientException { try { lookup.close(); cnxPool.close(); timer.stop(); externalExecutorProvider.shutdownNow(); conf.getAuthentication().close(); } catch (Throwable t) { log.warn("Failed to shutdown Pulsar client", t); throw new PulsarClientException(t); } }
timeout = client.timer().newTimeout(new TimerTask() { @Override public void run(Timeout t) throws Exception {
_timer.stop(); LOGGER.info("Timer shut down !!");
@Override public void run(Timeout t) throws Exception { Set<MessageId> messageIds = new HashSet<>(); writeLock.lock(); try { timePartitions.addLast(new ConcurrentOpenHashSet<>()); ConcurrentOpenHashSet<MessageId> headPartition = timePartitions.removeFirst(); if (!headPartition.isEmpty()) { log.warn("[{}] {} messages have timed-out", consumerBase, timePartitions.size()); headPartition.forEach(messageId -> { messageIds.add(messageId); messageIdPartitionMap.remove(messageId); }); } } finally { writeLock.unlock(); } if (messageIds.size() > 0) { consumerBase.redeliverUnacknowledgedMessages(messageIds); } timeout = client.timer().newTimeout(this, tickDurationInMs, TimeUnit.MILLISECONDS); } }, this.tickDurationInMs, TimeUnit.MILLISECONDS);
/** * Hook invoked when the cluster is shutting down after a call to {@link Cluster#close()}. * * <p>This is guaranteed to be called only after all connections have been individually closed, * and their channels closed, and only once per {@link Timer} instance. * * <p>This gives the implementor a chance to close the {@link Timer} properly, if required. * * <p>The default implementation calls a {@link Timer#stop()} of the passed {@link Timer} * instance. * * <p>Implementation note: if the {@link Timer} instance is being shared, or used for other * purposes than to schedule actions for the current cluster, than it should not be stopped here; * subclasses would have to override this method accordingly to take the appropriate action. * * @param timer the timer used by the cluster being closed */ public void onClusterClose(Timer timer) { timer.stop(); } }
private void reconnect(final RedisConnection connection, final int attempts){ int timeout = 2 << attempts; if (bootstrap.config().group().isShuttingDown()) { return; } try { timer.newTimeout(new TimerTask() { @Override public void run(Timeout timeout) throws Exception { tryReconnect(connection, Math.min(BACKOFF_CAP, attempts + 1)); } }, timeout, TimeUnit.MILLISECONDS); } catch (IllegalStateException e) { // skip } }
timer.stop();
private void reconnect(final RedisConnection connection, final int attempts){ int timeout = 2 << attempts; if (bootstrap.config().group().isShuttingDown()) { return; } try { timer.newTimeout(new TimerTask() { @Override public void run(Timeout timeout) throws Exception { tryReconnect(connection, Math.min(BACKOFF_CAP, attempts + 1)); } }, timeout, TimeUnit.MILLISECONDS); } catch (IllegalStateException e) { // skip } }
@Override public void destroy() { if (timer != null) { timer.stop(); } }
protected void sendPing(final ChannelHandlerContext ctx) { final RedisConnection connection = RedisConnection.getFrom(ctx.channel()); final RFuture<String> future = connection.async(StringCodec.INSTANCE, RedisCommands.PING); config.getTimer().newTimeout(new TimerTask() { @Override public void run(Timeout timeout) throws Exception { CommandData<?, ?> commandData = connection.getCurrentCommand(); if ((commandData == null || !commandData.isBlockingCommand()) && (future.cancel(false) || !future.isSuccess())) { ctx.channel().close(); log.debug("channel: {} closed due to PING response timeout set in {} ms", ctx.channel(), config.getPingConnectionInterval()); } else { sendPing(ctx); } } }, config.getPingConnectionInterval(), TimeUnit.MILLISECONDS); }
@Override public void destroy() { if (timer != null) { timer.stop(); } }
protected void sendPing(final ChannelHandlerContext ctx) { final RedisConnection connection = RedisConnection.getFrom(ctx.channel()); final RFuture<String> future = connection.async(StringCodec.INSTANCE, RedisCommands.PING); config.getTimer().newTimeout(new TimerTask() { @Override public void run(Timeout timeout) throws Exception { CommandData<?, ?> commandData = connection.getCurrentCommand(); if ((commandData == null || !commandData.isBlockingCommand()) && (future.cancel(false) || !future.isSuccess())) { ctx.channel().close(); log.debug("channel: {} closed due to PING response timeout set in {} ms", ctx.channel(), config.getPingConnectionInterval()); } else { sendPing(ctx); } } }, config.getPingConnectionInterval(), TimeUnit.MILLISECONDS); }
@Override public Set<Timeout> stop() { return this.timer.stop(); }