protected IdStreamSpec(String component, String stream) { _id = new GlobalStreamId(component, stream); } }
public GlobalStreamId deepCopy() { return new GlobalStreamId(this); }
protected IdStreamSpec(String component, String stream) { _id = new GlobalStreamId(component, stream); } }
public GlobalStreamId deepCopy() { return new GlobalStreamId(this); }
private void markBatchGroups(String component, Map<String, String> batchGroups) { for(Map.Entry<String, String> entry: batchGroups.entrySet()) { _batchIds.put(new GlobalStreamId(component, entry.getKey()), entry.getValue()); } }
protected BoltDeclarer grouping(String componentId, String streamId, Grouping grouping) { _commons.get(_boltId).put_to_inputs(new GlobalStreamId(componentId, streamId), grouping); return this; }
public GlobalStreamId getSourceGlobalStreamid() { return new GlobalStreamId(getSourceComponent(), streamId); }
/** * Returns the global stream id (component + stream) of this tuple. */ public GlobalStreamId getSourceGlobalStreamid() { return new GlobalStreamId(getSourceComponent(), streamId); }
List<GlobalStreamId> getBoltSubscriptionStreams(String id) { List<GlobalStreamId> ret = new ArrayList(); Component c = _bolts.get(id); for(InputDeclaration d: c.declarations) { ret.add(new GlobalStreamId(d.getComponent(), d.getStream())); } return ret; }
private BoltDeclarer grouping(String componentId, String streamId, Grouping grouping) { _commons.get(_boltId).put_to_inputs(new GlobalStreamId(componentId, streamId), grouping); return this; }
Map<GlobalStreamId, String> fleshOutStreamBatchIds(boolean includeCommitStream) { Map<GlobalStreamId, String> ret = new HashMap<>(_batchIds); Set<String> allBatches = new HashSet(_batchIds.values()); for(String b: allBatches) { ret.put(new GlobalStreamId(masterCoordinator(b), MasterBatchCoordinator.BATCH_STREAM_ID), b); if(includeCommitStream) { ret.put(new GlobalStreamId(masterCoordinator(b), MasterBatchCoordinator.COMMIT_STREAM_ID), b); } // DO NOT include the success stream as part of the batch. it should not trigger coordination tuples, // and is just a metadata tuple to assist in cleanup, should not trigger batch tracking } for(String id: _spouts.keySet()) { TransactionalSpoutComponent c = _spouts.get(id); if(c.batchGroupId!=null) { ret.put(new GlobalStreamId(spoutCoordinator(id), MasterBatchCoordinator.BATCH_STREAM_ID), c.batchGroupId); } } //this takes care of setting up coord streams for spouts and bolts for(GlobalStreamId s: _batchIds.keySet()) { String b = _batchIds.get(s); ret.put(new GlobalStreamId(s.get_componentId(), TridentBoltExecutor.COORD_STREAM(b)), b); } return ret; }
/** * For bolts that has incoming streams from spouts (the root bolts), * add checkpoint stream from checkpoint spout to its input. For other bolts, * add checkpoint stream from the previous bolt to its input. */ private void addCheckPointInputs(ComponentCommon component) { Set<GlobalStreamId> checkPointInputs = new HashSet<>(); for (GlobalStreamId inputStream : component.get_inputs().keySet()) { String sourceId = inputStream.get_componentId(); if (_spouts.containsKey(sourceId)) { checkPointInputs.add(new GlobalStreamId(CheckpointSpout.CHECKPOINT_COMPONENT_ID, CheckpointSpout.CHECKPOINT_STREAM_ID)); } else { checkPointInputs.add(new GlobalStreamId(sourceId, CheckpointSpout.CHECKPOINT_STREAM_ID)); } } for (GlobalStreamId streamId : checkPointInputs) { component.put_to_inputs(streamId, Grouping.all(new NullStruct())); } } private ComponentCommon getComponentCommon(String id, IComponent component) {
String id = spout.getKey(); GlobalStreamId stream = new GlobalStreamId(id, ACKER_INIT_STREAM_ID); String id = bolt.getKey(); GlobalStreamId streamAck = new GlobalStreamId(id, ACKER_ACK_STREAM_ID); Grouping groupAck = Thrift.mkFieldsGrouping(JStormUtils.mk_list("id")); GlobalStreamId streamFail = new GlobalStreamId(id, ACKER_FAIL_STREAM_ID); Grouping groupFail = Thrift.mkFieldsGrouping(JStormUtils.mk_list("id"));
Grouping other_element_value = other_element.getValue(); GlobalStreamId __this__inputs_copy_key = new GlobalStreamId(other_element_key);
protected BoltDeclarer grouping(String componentId, String streamId, Grouping grouping) { // Check if bolt is KvStateBolt, if so, enable the key range hash in upstream component TransactionBolt bolt = (TransactionBolt) _bolts.get(_boltId); if (bolt.getBoltExecutor() instanceof KvStatefulBoltExecutor) { ComponentCommon common = _commons.get(componentId); Map<String, Object> conf = new HashMap<>(); conf.put(ConfigExtension.ENABLE_KEY_RANGE_FIELD_GROUP, true); String currConf = common.get_json_conf(); common.set_json_conf(JStormUtils.mergeIntoJson(JStormUtils.parseJson(currConf), conf)); } // Add barrier snapshot stream for transaction topology Set<String> downstreamBolts = upToDownstreamComponentsMap.get(componentId); if (downstreamBolts != null && !downstreamBolts.contains(_boltId)) { downstreamBolts.add(_boltId); _commons.get(_boltId).put_to_inputs(new GlobalStreamId(componentId, TransactionCommon.BARRIER_STREAM_ID), Grouping.all(new NullStruct())); } _commons.get(_boltId).put_to_inputs(new GlobalStreamId(componentId, streamId), grouping); //return this; return super.grouping(componentId, streamId, grouping); } }
/** * Add watermark stream to source components of window bolts */ private void maybeAddWatermarkInputs(ComponentCommon common, IRichBolt bolt) { if (bolt instanceof WindowedBoltExecutor) { Set<String> comps = new HashSet<>(); for (GlobalStreamId globalStreamId : common.get_inputs().keySet()) { comps.add(globalStreamId.get_componentId()); } for (String comp : comps) { common.put_to_inputs( new GlobalStreamId(comp, Common.WATERMARK_STREAM_ID), Grouping.all(new NullStruct())); } } }
@Override public void execute(Tuple tuple) { List<Object> id = tuple.select(_idFields); GlobalStreamId streamId = new GlobalStreamId(tuple.getSourceComponent(), tuple.getSourceStreamId()); if (!_pending.containsKey(id)) { _pending.put(id, new HashMap<GlobalStreamId, Tuple>()); } Map<GlobalStreamId, Tuple> parts = _pending.get(id); if (parts.containsKey(streamId)) throw new RuntimeException("Received same side of single join twice"); parts.put(streamId, tuple); if (parts.size() == _numSources) { _pending.remove(id); List<Object> joinResult = new ArrayList<Object>(); for (String outField : _outFields) { GlobalStreamId loc = _fieldLocations.get(outField); joinResult.add(parts.get(loc).getValueByField(outField)); } _collector.emit(new ArrayList<Tuple>(parts.values()), joinResult); for (Tuple part : parts.values()) { _collector.ack(part); } } }
public static void bolt_failed_tuple(BoltTaskStatsRolling stats, String component, String stream, Long latency_ms) { GlobalStreamId key = new GlobalStreamId(component, stream); update_task_stat(stats, StormUtils.mk_arr("failed"), key, stats_rate(stats)); }
public static void bolt_acked_tuple(BoltTaskStatsRolling stats, String component, String stream, Long latency_ms) { GlobalStreamId key = new GlobalStreamId(component, stream); update_task_stat(stats, StormUtils.mk_arr("acked"), key, stats_rate(stats)); update_task_stat(stats, StormUtils.mk_arr("process_latencies"), key, latency_ms); }
common.put_to_streams(ACKER_INIT_STREAM_ID, Thrift.outputFields(initList)); GlobalStreamId ack_ack = new GlobalStreamId(ACKER_COMPONENT_ID, ACKER_ACK_STREAM_ID); common.put_to_inputs(ack_ack, Thrift.mkDirectGrouping()); GlobalStreamId ack_fail = new GlobalStreamId(ACKER_COMPONENT_ID, ACKER_FAIL_STREAM_ID); common.put_to_inputs(ack_fail, Thrift.mkDirectGrouping());