void fireConnectionEstablished() { // The request onSuccess calls this method connectionRequest = null; // Using nano time since it is not related to the wall clock, which may change long now = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); long deadline = protonTransport.tick(now); if (deadline != 0) { long delay = deadline - now; LOG.trace("IdleTimeoutCheck being initiated, initial delay: {}", delay); nextIdleTimeoutCheck = serializer.schedule(new IdleTimeoutCheck(), delay, TimeUnit.MILLISECONDS); } ProviderListener listener = this.listener; if (listener != null) { listener.onConnectionEstablished(remoteURI); } }
void fireConnectionEstablished() { // The request onSuccess calls this method connectionRequest = null; // Using nano time since it is not related to the wall clock, which may change long now = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); long deadline = protonTransport.tick(now); if (deadline != 0) { long delay = deadline - now; LOG.trace("IdleTimeoutCheck being initiated, initial delay: {}", delay); nextIdleTimeoutCheck = serializer.schedule(new IdleTimeoutCheck(), delay, TimeUnit.MILLISECONDS); } ProviderListener listener = this.listener; if (listener != null) { listener.onConnectionEstablished(remoteURI); } }
private void initiateIdleTimeoutChecks() { // Using nano time since it is not related to the wall clock, which may change long now = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); long deadline = transport.tick(now); if (deadline != 0) { // timer treats 0 as error, ensure value is at least 1 as there was a deadline long delay = Math.max(deadline - now, 1); LOG.trace("IdleTimeoutCheck being initiated, initial delay: {0}", delay); idleTimeoutCheckTimerId = vertx.setTimer(delay, new IdleTimeoutCheck()); } }
private void configureInactivityMonitor() { AmqpInactivityMonitor monitor = amqpTransport.getInactivityMonitor(); if (monitor == null) { return; } // If either end has idle timeout requirements then the tick method // will give us a deadline on the next time we need to tick() in order // to meet those obligations. // Using nano time since it is not related to the wall clock, which may change long now = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); long nextIdleCheck = protonTransport.tick(now); if (nextIdleCheck != 0) { // monitor treats <= 0 as no work, ensure value is at least 1 as there was a deadline long delay = Math.max(nextIdleCheck - now, 1); LOG.trace("Connection keep-alive processing starts in: {}", delay); monitor.startKeepAliveTask(delay); } else { LOG.trace("Connection does not require keep-alive processing"); } } }
private void configureInactivityMonitor() { AmqpInactivityMonitor monitor = amqpTransport.getInactivityMonitor(); if (monitor == null) { return; } // If either end has idle timeout requirements then the tick method // will give us a deadline on the next time we need to tick() in order // to meet those obligations. // Using nano time since it is not related to the wall clock, which may change long now = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); long nextIdleCheck = protonTransport.tick(now); if (nextIdleCheck != 0) { // monitor treats <= 0 as no work, ensure value is at least 1 as there was a deadline long delay = Math.max(nextIdleCheck - now, 1); LOG.trace("Connection keep-alive processing starts in: {}", delay); monitor.startKeepAliveTask(delay); } else { LOG.trace("Connection does not require keep-alive processing"); } } }
@Override public void run() { final String methodName = "run"; logger.entry(this, methodName); transport.process(); transport.tick(System.currentTimeMillis()); logger.exit(methodName); } };
@Override public long tick(boolean firstTick) { if (!firstTick) { try { if (connection.getLocalState() != EndpointState.CLOSED) { long rescheduleAt = transport.tick(TimeUnit.NANOSECONDS.toMillis(System.nanoTime())); if (transport.isClosed()) { throw new IllegalStateException("Channel was inactive for to long"); } return rescheduleAt; } } catch (Exception e) { transport.close(); connection.setCondition(new ErrorCondition()); } return 0; } return transport.tick(TimeUnit.NANOSECONDS.toMillis(System.nanoTime())); }
public Long tick(boolean firstTick) { requireHandler(); if (!firstTick) { try { if (connection.getLocalState() != EndpointState.CLOSED) { long rescheduleAt = transport.tick(TimeUnit.NANOSECONDS.toMillis(System.nanoTime())); if (transport.isClosed()) { throw new IllegalStateException("Channel was inactive for to long"); } return rescheduleAt; } } catch (Exception e) { log.warn(e.getMessage(), e); transport.close(); connection.setCondition(new ErrorCondition()); } finally { flush(); } return 0L; } return transport.tick(TimeUnit.NANOSECONDS.toMillis(System.nanoTime())); }
@Override public void run() { boolean checkScheduled = false; if (connection.getLocalState() == EndpointState.ACTIVE) { // Using nano time since it is not related to the wall clock, which may change long now = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); long deadline = protonTransport.tick(now); boolean pumpSucceeded = pumpToProtonTransport(); if (protonTransport.isClosed()) { LOG.info("IdleTimeoutCheck closed the transport due to the peer exceeding our requested idle-timeout."); if (pumpSucceeded) { fireProviderException(new IOException("Transport closed due to the peer exceeding our requested idle-timeout")); } } else { if (deadline != 0) { long delay = deadline - now; checkScheduled = true; LOG.trace("IdleTimeoutCheck rescheduling with delay: {}", delay); nextIdleTimeoutCheck = serializer.schedule(this, delay, TimeUnit.MILLISECONDS); } } } else { LOG.trace("IdleTimeoutCheck skipping check, connection is not active."); } if (!checkScheduled) { nextIdleTimeoutCheck = null; LOG.trace("IdleTimeoutCheck exiting"); } } }
try { if (connection.getLocalState() != EndpointState.CLOSED) { long rescheduleAt = transport.tick(TimeUnit.NANOSECONDS.toMillis(System.nanoTime())); if (transport.isClosed()) { throw new IllegalStateException("Channel was inactive for to long"); return transport.tick(TimeUnit.NANOSECONDS.toMillis(System.nanoTime())); } finally { lock.unlock();
@Override public void run() { boolean checkScheduled = false; if (connection.getLocalState() == EndpointState.ACTIVE) { // Using nano time since it is not related to the wall clock, which may change long now = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); long deadline = protonTransport.tick(now); boolean pumpSucceeded = pumpToProtonTransport(); if (protonTransport.isClosed()) { LOG.info("IdleTimeoutCheck closed the transport due to the peer exceeding our requested idle-timeout."); if (pumpSucceeded) { fireProviderException(new IOException("Transport closed due to the peer exceeding our requested idle-timeout")); } } else { if (deadline != 0) { long delay = deadline - now; checkScheduled = true; LOG.trace("IdleTimeoutCheck rescheduling with delay: {}", delay); nextIdleTimeoutCheck = serializer.schedule(this, delay, TimeUnit.MILLISECONDS); } } } else { LOG.trace("IdleTimeoutCheck skipping check, connection is not active."); } if (!checkScheduled) { nextIdleTimeoutCheck = null; LOG.trace("IdleTimeoutCheck exiting"); } } }
private static long deadline(SelectableImpl selectable) { Reactor reactor = selectable.getReactor(); Transport transport = selectable.getTransport(); long deadline = transport.tick(reactor.now()); return deadline; }
@Override public long keepAlive() throws IOException { long rescheduleAt = 0l; LOG.trace("Performing connection:{} keep-alive processing", amqpTransport.getRemoteAddress()); if (protonConnection.getLocalState() != EndpointState.CLOSED) { // Using nano time since it is not related to the wall clock, which may change long now = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); long deadline = protonTransport.tick(now); pumpProtonToSocket(); if (protonTransport.isClosed()) { LOG.debug("Transport closed after inactivity check."); throw new InactivityIOException("Channel was inactive for too long"); } else { if(deadline != 0) { // caller treats 0 as no-work, ensure value is at least 1 as there was a deadline rescheduleAt = Math.max(deadline - now, 1); } } } LOG.trace("Connection:{} keep alive processing done, next update in {} milliseconds.", amqpTransport.getRemoteAddress(), rescheduleAt); return rescheduleAt; }
private static long deadline(SelectableImpl selectable) { Reactor reactor = selectable.getReactor(); Transport transport = selectable.getTransport(); long deadline = transport.tick(reactor.now()); return deadline; }
@Override public long keepAlive() throws IOException { long rescheduleAt = 0l; LOG.trace("Performing connection:{} keep-alive processing", amqpTransport.getRemoteAddress()); if (protonConnection.getLocalState() != EndpointState.CLOSED) { // Using nano time since it is not related to the wall clock, which may change long now = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); long deadline = protonTransport.tick(now); pumpProtonToSocket(); if (protonTransport.isClosed()) { LOG.debug("Transport closed after inactivity check."); throw new InactivityIOException("Channel was inactive for too long"); } else { if(deadline != 0) { // caller treats 0 as no-work, ensure value is at least 1 as there was a deadline rescheduleAt = Math.max(deadline - now, 1); } } } LOG.trace("Connection:{} keep alive processing done, next update in {} milliseconds.", amqpTransport.getRemoteAddress(), rescheduleAt); return rescheduleAt; }
private void writeToNetwork(EngineConnection engineConnection) { final String methodName = "writeToNetwork"; logger.entry(this, methodName, engineConnection); if (engineConnection.transport.pending() > 0) { ByteBuffer head = engineConnection.transport.head(); int amount = head.remaining(); engineConnection.channel.write(head, new NetworkWritePromiseImpl(this, amount, engineConnection)); engineConnection.transport.pop(amount); engineConnection.transport.tick(System.currentTimeMillis()); } logger.exit(this, methodName); }
@Override public void run() { try { if (getEndpoint().getLocalState() != EndpointState.CLOSED) { LOG.debug("Client performing next idle check"); // Using nano time since it is not related to the wall clock, which may change long now = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); long deadline = protonTransport.tick(now); pumpToProtonTransport(); if (protonTransport.isClosed()) { LOG.debug("Transport closed after inactivity check."); throw new InactivityIOException("Channel was inactive for too long"); } else { if (deadline != 0) { getScheduler().schedule(this, deadline - now, TimeUnit.MILLISECONDS); } } } } catch (Exception e) { try { transport.close(); } catch (IOException e1) { } fireClientException(e); } } }, initialKeepAliveDeadline - initialNow, TimeUnit.MILLISECONDS);
@Override public void handle(Long event) { boolean checkScheduled = false; if (connection.getLocalState() == EndpointState.ACTIVE) { // Using nano time since it is not related to the wall clock, which may change long now = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); long deadline = transport.tick(now); flush(); if (transport.isClosed()) { LOG.info("IdleTimeoutCheck closed the transport due to the peer exceeding our requested idle-timeout."); disconnect(); } else { if (deadline != 0) { // timer treats 0 as error, ensure value is at least 1 as there was a deadline long delay = Math.max(deadline - now, 1); checkScheduled = true; LOG.trace("IdleTimeoutCheck rescheduling with delay: {0}", delay); idleTimeoutCheckTimerId = vertx.setTimer(delay, this); } } } else { LOG.trace("IdleTimeoutCheck skipping check, connection is not active."); } if (!checkScheduled) { idleTimeoutCheckTimerId = null; LOG.trace("IdleTimeoutCheck exiting"); } } }
@Override public void run(Selectable selectable) { Reactor reactor = selectable.getReactor(); Transport transport = ((SelectableImpl)selectable).getTransport(); long deadline = transport.tick(reactor.now()); selectable.setDeadline(deadline); int c = capacity(selectable); int p = pending(selectable); selectable.setReading(c > 0); selectable.setWriting(p > 0); reactor.update(selectable); } };
@Override public void run(Selectable selectable) { Reactor reactor = selectable.getReactor(); Transport transport = ((SelectableImpl)selectable).getTransport(); long deadline = transport.tick(reactor.now()); selectable.setDeadline(deadline); int c = capacity(selectable); int p = pending(selectable); selectable.setReading(c > 0); selectable.setWriting(p > 0); reactor.update(selectable); } };