batchValues.add(pair); TupleImplExt batchTuple = new TupleImplExt(topology_context, batchValues, task_id, outStreamId, null); batchTuple.setTargetTaskId(t); batchTuple.setBatchTuple(true); if (batchId != -1) { batchTuple.setBatchId(batchId);
public TupleImplExt(GeneralTopologyContext context, List<Object> values, MessageId id, TupleImplExt tuple) { super(context, values, tuple.getSourceTask(), tuple.getSourceStreamId(), id); this.targetTaskId = tuple.getTargetTaskId(); this.creationTimeStamp = tuple.getCreationTimeStamp(); this.batchId = tuple.getBatchId(); }
public static void send(TopologyContext topologyContext, TaskSendTargets taskTargets, TaskTransfer transferFn, String stream, List<Object> values) { List<Integer> tasks = taskTargets.get(stream, values, null, values.get(0)); if (tasks.size() == 0) { return; } Integer taskId = topologyContext.getThisTaskId(); for (Integer task : tasks) { TupleImplExt tup = new TupleImplExt(topologyContext, values, taskId, stream); tup.setTargetTaskId(task); transferFn.transfer(tup); } } }
batchValues.add(pair); TupleImplExt batchTuple = new TupleImplExt(topology_context, batchValues, task_id, outStreamId, null); batchTuple.setTargetTaskId(t); batchTuple.setBatchTuple(true); transfer_fn.transfer(batchTuple);
@Override public void updateObject() { this.object = new TupleImplExt(topologyContext, new Values(TimeUtils.current_time_secs()), (int) Constants.SYSTEM_TASK_ID, Constants.SYSTEM_TICK_STREAM_ID); }
@Override public void fail(Tuple input) { long batchId = ((TupleImplExt) input).getBatchId(); if (batchId > 0) bolt.fail(batchId); else LOG.warn("It's not allowed to fail a tuple with invalid batchId={}", batchId); }
protected List<Integer> sendCtrlMsg(String out_stream_id, List<Object> values, Collection<Tuple> anchors, Integer out_task_id) { final long start = emitTimer.getTime(); java.util.List<Integer> out_tasks = null; try { if (out_task_id != null) { out_tasks = sendTargets.get(out_task_id, out_stream_id, values, anchors, null); } else { out_tasks = sendTargets.get(out_stream_id, values, anchors, null); } tryRotate(); for (Integer t : out_tasks) { MessageId msgId = getMessageId(anchors); TupleImplExt tp = new TupleImplExt(topologyContext, values, taskId, out_stream_id, msgId); tp.setTargetTaskId(t); transferCtr(tp); } } catch (Exception e) { LOG.error("bolt emit error:", e); } finally { emitTimer.updateTime(start); } return out_tasks; }
TupleImplExt batchTuple = new TupleImplExt(topologyContext, batchValues, task_id, outStreamId, null); batchTuple.setTargetTaskId(t); batchTuple.setBatchTuple(true); taskTransfer.transfer(batchTuple);
private void processTupleBatchEvent(Tuple tuple) { try { if ((!isSystemBolt && tuple.getSourceStreamId().equals(Common.TOPOLOGY_MASTER_CONTROL_STREAM_ID)) || tuple.getSourceStreamId().equals(Common.TOPOLOGY_MASTER_REGISTER_METRICS_RESP_STREAM_ID)) { if (tuple.getValues().get(0) instanceof Pair) { for (Object value : tuple.getValues()) { Pair<MessageId, List<Object>> val = (Pair<MessageId, List<Object>>) value; TupleImplExt t = new TupleImplExt( sysTopologyCtx, val.getSecond(), val.getFirst(), ((TupleImplExt) tuple)); processTupleEvent(t); } } } else { bolt.execute(tuple); } } catch (Throwable e) { error = e; LOG.error("bolt execute error ", e); reportError.report(e); } }
@Override public void ack(Tuple input) { Pair<Long, Integer> pendingBatch = tracker.getPendingBatch(((TupleImplExt) input).getBatchId(), input.getSourceStreamId()); if (pendingBatch != null) { long rootId = getRootId(input); if (rootId != 0) pendingBatch.setFirst(JStormUtils.bit_xor(pendingBatch.getFirst(), rootId)); } delegate.ack(input); }
TupleImplExt tuple = new TupleImplExt(_context, values, taskId, streamName, id); tuple.setBatchTuple(isBatchTuple); tuple.setTargetTaskId(targetTaskId); tuple.setCreationTimeStamp(timeStamp); if (_isTransactionTuple) { tuple.setBatchId(batchId);
public TupleImplExt(GeneralTopologyContext context, List<Object> values, MessageId id, TupleImplExt tuple) { super(context, values, tuple.getSourceTask(), tuple.getSourceStreamId(), id); this.targetTaskId = tuple.getTargetTaskId(); this.creationTimeStamp = tuple.getCreationTimeStamp(); }
public List<Integer> sendMsg(String out_stream_id, List<Object> values, Collection<Tuple> anchors, Integer out_task_id, ICollectorCallback callback) { final long start = emitTimer.getTime(); List<Integer> outTasks = null; try { if (out_task_id != null) { outTasks = sendTargets.get(out_task_id, out_stream_id, values, anchors, null); } else { outTasks = sendTargets.get(out_stream_id, values, anchors, null); } tryRotate(); for (Integer t : outTasks) { MessageId msgId = getMessageId(anchors); TupleImplExt tp = new TupleImplExt(topologyContext, values, taskId, out_stream_id, msgId); tp.setTargetTaskId(t); taskTransfer.transfer(tp); } } catch (Exception e) { LOG.error("bolt emit error:", e); } finally { if (outTasks == null) { outTasks = new ArrayList<>(); } if (callback != null) callback.execute(out_stream_id, outTasks, values); emitTimer.updateTime(start); } return outTasks; }
@SuppressWarnings("unchecked") public void sendHbMsg() { if (componentId.equals(Common.TOPOLOGY_MASTER_COMPONENT_ID)) { Values values = new Values(uptime.uptime(), executorStatus.getStatus()); TupleExt tuple = new TupleImplExt(sysTopologyCtx, values, taskId, Common.TOPOLOGY_MASTER_HB_STREAM_ID); queue.publish(tuple); } else { // Send task heartbeat to topology master List values = JStormUtils.mk_list(uptime.uptime(), executorStatus.getStatus()); LOG.debug("Send TaskHeartbeat from task:{}, uptime:{}", taskId, uptime.uptime()); if (spoutCollector != null) { spoutCollector.emitCtrl(Common.TOPOLOGY_MASTER_HB_STREAM_ID, values, null); } else if (boltCollector != null) { boltCollector.emitCtrl(Common.TOPOLOGY_MASTER_HB_STREAM_ID, null, values); } else { LOG.warn("Failed to send heartbeat msg: OutputCollector has not been initialized!"); } } }
@Override public void execute(Tuple input) { long rootId = getRootId(input); if (rootId != 0) { long batchId = ((TupleImplExt) input).getBatchId(); String streamId = input.getSourceStreamId(); Pair<Long, Integer> pendingBatch = batchXorTracker.getPendingBatch(batchId, streamId, true); if (pendingBatch == null) { pendingBatch = new Pair<>(0l, 0); batchXorTracker.putPendingBatch(batchId, streamId, pendingBatch); } pendingBatch.setFirst(JStormUtils.bit_xor(pendingBatch.getFirst(), rootId)); int count = pendingBatch.getSecond(); pendingBatch.setSecond(++count); } bolt.execute(input); }
TupleImplExt batchTuple = new TupleImplExt(topologyContext, batchValues, taskId, outStreamId, null); batchTuple.setTargetTaskId(t); batchTuple.setBatchTuple(true); if (batchId != -1) { batchTuple.setBatchId(batchId);
TupleImplExt tp = new TupleImplExt(topology_context, values, task_id, out_stream_id, msgId); tp.setTargetTaskId(t); transferCtr(tp);
for (Object value : values) { Pair<MessageId, List<Object>> val = (Pair<MessageId, List<Object>>) value; TupleImplExt tuple = new TupleImplExt( sysTopologyCtx, val.getSecond(), val.getFirst(), ((TupleImplExt) event)); runnable = processTupleEvent(tuple);
values = _kryo.deserializeFrom(input); TupleImplExt tuple = new TupleImplExt(_context, values, taskId, streamName, id); tuple.setBatchTuple(isBatchTuple); tuple.setTargetTaskId(targetTaskId); tuple.setCreationTimeStamp(timeStamp); return tuple; } catch (Exception e) {