public boolean deserializer(KryoTupleDeserializer deserializer, boolean forceConsume) { //LOG.debug("start Deserializer of task, {}", taskId); boolean isIdling = true; DisruptorQueue exeQueue = innerTaskTransfer.get(taskId); if (!taskStatus.isShutdown()) { if ((deserializeQueue.population() > 0 && exeQueue.pctFull() < 1.0) || forceConsume) { try { List<Object> objects = deserializeQueue.retreiveAvailableBatch(); for (Object object : objects) { deserialize(deserializer, (byte[]) object, exeQueue); } isIdling = false; } catch (InterruptedException e) { LOG.error("InterruptedException " + e.getCause()); return true; } catch (TimeoutException e) { return true; } catch (Throwable e) { if (Utils.exceptionCauseIsInstanceOf(KryoException.class, e)) { throw new RuntimeException(e); } else if (!taskStatus.isShutdown()) { LOG.error("Unknown exception ", e); } } } } else { task.unregisterDeserializeQueue(); } return isIdling; }
public void push(int taskId, TupleExt tuple) { serializeQueue.publish(tuple); }
@Override public void consumeExecuteQueue() { // Consume control queue first, and then async consume execute queue controlQueue.consumeBatchWhenAvailable(this); exeQueue.consumeBatch(this); } }
protected void consumeBatch(EventHandler<Object> handler) { boolean isConsumeEvent = false; if (controlQueue.population() > 0) { controlQueue.consumeBatch(handler); isConsumeEvent = true; } if (exeQueue.population() > 0) { exeQueue.consumeBatch(handler); isConsumeEvent = true; } if (!isConsumeEvent) JStormUtils.sleepMs(1); } }
public void flowCtrl(Channel channel, DisruptorQueue queue, int taskId, byte[] message) { boolean initFlowCtrl = false; final Set<String> remoteAddrs = flowCtrlClients.get(taskId); synchronized (remoteAddrs) { if (remoteAddrs.size() > 0) { queue.publishCache(message); if(remoteAddrs.add(channel.getRemoteAddress().toString())) JStormUtils.sendFlowControlRequest(channel, taskId, 1); } else { queue.publish(message); if (queue.pctFull() > highMark) { remoteAddrs.add(channel.getRemoteAddress().toString()); JStormUtils.sendFlowControlRequest(channel, taskId, 1); initFlowCtrl = true; } } } if (initFlowCtrl) queue.publishCallback(new BackpressureCallback(this, taskId)); }
public void serializer(KryoTupleSerializer serializer) { LOG.debug("start serializer of task: {}", taskId); if (!AsyncLoopRunnable.getShutdown().get()) { //note: sleep 1ms to reduce cpu usage when serializeQueue is empty if (serializeQueue.population() == 0) { Utils.sleep(1); return; } try { List<Object> objects = serializeQueue.retreiveAvailableBatch(); for (Object object : objects) { if (object == null) { continue; } serialize(serializer, object); } } catch (InterruptedException e) { LOG.error("InterruptedException " + e.getCause()); } catch (TimeoutException ignored) { } catch (AlertException e) { LOG.error(e.getMessage(), e); throw new RuntimeException(e); } } }
long timeout = JStormUtils.parseLong(stormConf.get(Config.TOPOLOGY_DISRUPTOR_WAIT_TIMEOUT), 10); WaitStrategy waitStrategy = new TimeoutBlockingWaitStrategy(timeout, TimeUnit.MILLISECONDS); this.transferCtrlQueue = DisruptorQueue.mkInstance( "TotalTransfer", ProducerType.MULTI, queueSize, waitStrategy, false, 0, 0);
public float pctFull() { return (1.0F * population() / capacity()); }
@Override public Object getState() { Map state = new HashMap<String, Object>(); // get readPos then writePos so it's never an under-estimate long rp = readPos(); long wp = writePos(); state.put("capacity", capacity()); state.put("population", wp - rp); state.put("write_pos", wp); state.put("read_pos", rp); return state; }
private void executeEvent() { try { exeQueue.consumeBatch(this); } catch (Exception e) { if (!taskStatus.isShutdown()) { LOG.error("unknown exception:", e); } } }
DisruptorQueue.setUseSleep(disruptorUseSleep); LOG.info("Disruptor use sleep:" + disruptorUseSleep); long timeout = JStormUtils.parseLong(stormConf.get(Config.TOPOLOGY_DISRUPTOR_WAIT_TIMEOUT), 10); WaitStrategy waitStrategy = new TimeoutBlockingWaitStrategy(timeout, TimeUnit.MILLISECONDS); this.transferCtrlQueue = DisruptorQueue.mkInstance("TotalTransfer", ProducerType.MULTI, queueSize, waitStrategy, false, 0, 0); this.sendingQueue = DisruptorQueue.mkInstance("TotalSending", ProducerType.MULTI, buffer_size, waitStrategy, false, 0, 0);
@Override public void run() { LOG.info("Successfully start thread " + idStr); //queue.consumerStarted(); while (!shutdown.get()) { queue.consumeBatchWhenAvailable(this); } LOG.info("Successfully exit thread " + idStr); }
@Override public void run() { while (!shutdown.get()) { serializeQueue.multiConsumeBatchWhenAvailable(this); } }
public void consumerStarted() { if(!consumerStartedFlag) { consumerStartedFlag = true; flushCache(); } }
public void consumeBatch(EventHandler<Object> handler) { consumeBatchToCursor(_barrier.getCursor(), handler); }
private void flowCtrl(Channel channel, String addr, DisruptorQueue queue, int taskId, byte[] message) { boolean isFlowCtrl = false; boolean isInitFlowCtrl = false; if (queue.pctFull() > lowMark || queue.cacheSize() > 0) { HashSet<String> remoteAddrs = remoteClientsUnderFlowCtrl.get(taskId); synchronized (remoteAddrs) { if (remoteAddrs.isEmpty()) { if (queue.pctFull() >= highMark) { remoteAddrs.add(addr); isInitFlowCtrl = true; queue.publishCache(message); if (isInitFlowCtrl) { queue.publishCallback(new BackpressureCallback(allChannels, queue, lowMark, taskId, remoteClientsUnderFlowCtrl)); queue.publish(message);
public void deserializeTuple(KryoTupleDeserializer deserializer, byte[] serMsg, DisruptorQueue queue) { Tuple tuple = deserializer.deserialize(serMsg); if (tuple != null) { if (JStormDebugger.isDebugRecv(tuple.getMessageId())) { LOG.info(idStr + " receive " + tuple.toString()); } //queue.publish(tuple); if (isBackpressureEnable) { if (backpressureStatus) { while (queue.pctFull() > lowMark) { JStormUtils.sleepMs(1); } queue.publish(tuple); backpressureStatus = false; } else { queue.publish(tuple); if (queue.pctFull() > highMark) { backpressureStatus = true; } } } else { queue.publish(tuple); } } } }
public void serializer(KryoTupleSerializer serializer) { LOG.debug("start Serializer of task, {}", taskId); if (!AsyncLoopRunnable.getShutdown().get()) { //note: avoid to cpu idle when serializeQueue is empty if (serializeQueue.population() == 0){ Utils.sleep(1); return; } try { List<Object> objects = serializeQueue.retreiveAvailableBatch(); for (Object object : objects) { if (object == null) { continue; } serialize(serializer, object); } } catch (InterruptedException e) { LOG.error("InterruptedException " + e.getCause()); return; } catch (TimeoutException e) { return; }catch (AlertException e) { LOG.error(e.getMessage(), e); throw new RuntimeException(e); } } }