public BoltDeclarer setBolt(String id, IBasicBolt bolt, int parallel) { CoordinatedBolt coordinatedBolt = new CoordinatedBolt(bolt); BoltDeclarer boltDeclarer = topologyBuilder.setBolt(id, coordinatedBolt, parallel); if (bolt instanceof IPrepareCommit) { boltDeclarer.allGrouping(BatchDef.SPOUT_TRIGGER, BatchDef.PREPARE_STREAM_ID); } if (bolt instanceof ICommitter) { boltDeclarer.allGrouping(BatchDef.SPOUT_TRIGGER, BatchDef.COMMIT_STREAM_ID); boltDeclarer.allGrouping(BatchDef.SPOUT_TRIGGER, BatchDef.REVERT_STREAM_ID); } if (bolt instanceof IPostCommit) { boltDeclarer.allGrouping(BatchDef.SPOUT_TRIGGER, BatchDef.POST_STREAM_ID); } return boltDeclarer; }
public BoltDeclarer setSpout(String id, IBatchSpout spout, int parallel) { BoltDeclarer boltDeclarer = this.setBolt(id, spout, parallel); boltDeclarer.allGrouping(BatchDef.SPOUT_TRIGGER, BatchDef.COMPUTING_STREAM_ID); return boltDeclarer; }
.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_BATCH_STREAM_ID) .addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id); if (_spout instanceof ICommitterTransactionalSpout) { emitterDeclarer.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID); input.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
specs), c.parallelism); bd.allGrouping(spoutCoordinator(id), MasterBatchCoordinator.BATCH_STREAM_ID); bd.allGrouping(masterCoordinator(batchGroup), MasterBatchCoordinator.SUCCESS_STREAM_ID); if(c.spout instanceof ICommitterTridentSpout) { bd.allGrouping(masterCoordinator(batchGroup), MasterBatchCoordinator.COMMIT_STREAM_ID); d.allGrouping(masterCoordinator(b), MasterBatchCoordinator.COMMIT_STREAM_ID);
null), _spoutParallelism) .allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_BATCH_STREAM_ID) .addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id); if(_spout instanceof ICommitterTransactionalSpout) { emitterDeclarer.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID); input.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
public static void test() { int spout_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1); int split_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_SPLIT_PARALLELISM_HINT), 1); int count_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_COUNT_PARALLELISM_HINT), 1); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", new FastRandomSentenceSpout(), spout_Parallelism_hint); builder.setBolt("split", new SplitSentence(), split_Parallelism_hint).shuffleGrouping("spout"); int topN = 10; Time win = Time.seconds(10L); builder.setBolt("count", new WordCount(topN) .timeWindow(win) .withStateSize(Time.seconds(120L)), count_Parallelism_hint).fieldsGrouping("split", new Fields("word")); builder.setBolt("merge", new MergeTopN(topN).timeWindow(win), 1).allGrouping("count"); String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\."); String topologyName = className[className.length - 1]; try { JStormHelper.runTopology(builder.createTopology(), topologyName, conf, 60, new JStormHelper.CheckAckedFail(conf), true); } catch (Exception e) { e.printStackTrace(); } }
.allGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME, SequenceTopologyDef.CONTROL_STREAM_ID) .addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 3); } else {
public BoltDeclarer setBolt(String id, IBasicBolt bolt, int paralel) { CoordinatedBolt coordinatedBolt = new CoordinatedBolt(bolt); BoltDeclarer boltDeclarer = topologyBuilder.setBolt(id, coordinatedBolt, paralel); if (bolt instanceof IPrepareCommit) { boltDeclarer.allGrouping(BatchDef.SPOUT_TRIGGER, BatchDef.PREPARE_STREAM_ID); } if (bolt instanceof ICommitter) { boltDeclarer.allGrouping(BatchDef.SPOUT_TRIGGER, BatchDef.COMMIT_STREAM_ID); boltDeclarer.allGrouping(BatchDef.SPOUT_TRIGGER, BatchDef.REVERT_STREAM_ID); } if (bolt instanceof IPostCommit) { boltDeclarer.allGrouping(BatchDef.SPOUT_TRIGGER, BatchDef.POST_STREAM_ID); } return boltDeclarer; }
public BoltDeclarer setSpout(String id, IBatchSpout spout, int paralel) { BoltDeclarer boltDeclarer = this.setBolt(id, (IBatchSpout) spout, paralel); boltDeclarer.allGrouping(BatchDef.SPOUT_TRIGGER, BatchDef.COMPUTING_STREAM_ID); return boltDeclarer; }
private static void declarebolt(TopologyBuilder builder, String boltName, IRichBolt bolt, int parallelism, boolean control) { BoltDeclarer declarer = builder.setBolt(boltName, bolt, parallelism) .allGrouping(FLOW_LOADER_STREAM, FLOW_LOADER_STREAM) .allGrouping("tick", "tick") .localOrShuffleGrouping(INITIALIZER, boltName) .localOrShuffleGrouping(FILTER, boltName) .fieldsGrouping(PARTITION, boltName, new Fields(FLOW_ID, PARTITION)) // guaranteed partitions will always group the same flow for flows that have joins with default partitions. .localOrShuffleGrouping(AGGREGATE, boltName) .localOrShuffleGrouping(SELECT, boltName) .localOrShuffleGrouping(EACH, boltName) .localOrShuffleGrouping(SORT, boltName) .localOrShuffleGrouping(SWITCH, boltName) .localOrShuffleGrouping(SPLIT, boltName) .localOrShuffleGrouping(JOIN, boltName); if(control) { // control stream is all-grouped declarer.allGrouping(INITIALIZER, BROADCAST_STREAM + boltName) .allGrouping(FILTER, BROADCAST_STREAM + boltName) .allGrouping(PARTITION, BROADCAST_STREAM + boltName) .allGrouping(AGGREGATE, BROADCAST_STREAM + boltName) .allGrouping(SELECT, BROADCAST_STREAM + boltName) .allGrouping(EACH, BROADCAST_STREAM + boltName) .allGrouping(SORT, BROADCAST_STREAM + boltName) .allGrouping(SWITCH, BROADCAST_STREAM + boltName) .allGrouping(SPLIT, BROADCAST_STREAM + boltName) .allGrouping(JOIN, BROADCAST_STREAM + boltName); } }
specs), c.parallelism); bd.allGrouping(spoutCoordinator(id), MasterBatchCoordinator.BATCH_STREAM_ID); bd.allGrouping(masterCoordinator(batchGroup), MasterBatchCoordinator.SUCCESS_STREAM_ID); if(c.spout instanceof ICommitterTridentSpout) { bd.allGrouping(masterCoordinator(batchGroup), MasterBatchCoordinator.COMMIT_STREAM_ID); d.allGrouping(masterCoordinator(b), MasterBatchCoordinator.COMMIT_STREAM_ID);
.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_BATCH_STREAM_ID) .addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id); if (_spout instanceof ICommitterTransactionalSpout) { emitterDeclarer.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID); input.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
null), _spoutParallelism) .allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_BATCH_STREAM_ID) .addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id); if(_spout instanceof ICommitterTransactionalSpout) { emitterDeclarer.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID); input.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
public static void main(String[] args) throws InterruptedException { try { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("word-reader", new WordReader()); builder.setSpout("signals-spout", new SignalsSpout()); builder.setBolt("word-normalizer", new WordNormalizer()) .shuffleGrouping("word-reader"); builder.setBolt("word-counter", new WordCounter(), 2) .shuffleGrouping("word-normalizer") .allGrouping("signals-spout", "signals"); Config conf = new Config(); conf.put("wordsFile", args[0]); conf.setDebug(true); conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 1); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("Getting-Started-Toplogie", conf, builder.createTopology()); Thread.sleep(4000); cluster.shutdown(); } catch(Exception ioe) { System.out.println("################ Exception thrown ################"); ioe.printStackTrace(); } }
@Override public StormTopology buildTopology() { eventSpout.setFields(new Fields(Field.EVENT_ID, Field.ROUND_NUM)); rewardSpout.setFields(new Fields(Field.ACTION_ID, Field.REWARD)); builder.setSpout(Component.EVENT_SPOUT, eventSpout, eventSpoutThreads); builder.setSpout(Component.REWARD_SPOUT, rewardSpout, rewardSpoutThreads); builder.setBolt(Component.LEARNER, new ReinforcementLearnerBolt(), learnerThreads) .shuffleGrouping(Component.EVENT_SPOUT) .allGrouping(Component.REWARD_SPOUT); builder.setBolt(Component.SINK, actionSink, sinkThreads) .shuffleGrouping(Component.LEARNER); return builder.createTopology(); }
builder.setBolt(INITIALIZER, new FlowInitializerBolt(), parallelismHint) // kicks off a flow determining where to start .localOrShuffleGrouping(EVENT) .allGrouping(FLOW_LOADER_STREAM, FLOW_LOADER_STREAM);
break; case ALL: declarer.allGrouping(stream.getFrom(), streamId); break; case DIRECT:
@Override public StormTopology buildTopology() { trainingSpout.setFields(new Fields(Field.ID, Field.MESSAGE, Field.IS_SPAM)); analysisSpout.setFields(new Fields(Field.ID, Field.MESSAGE)); builder.setSpout(Component.TRAINING_SPOUT, trainingSpout, trainingSpoutThreads); builder.setSpout(Component.ANALYSIS_SPOUT, analysisSpout, analysisSpoutThreads); builder.setBolt(Component.TOKENIZER, new TokenizerBolt(), tokenizerThreads) .shuffleGrouping(Component.TRAINING_SPOUT) .shuffleGrouping(Component.ANALYSIS_SPOUT); builder.setBolt(Component.WORD_PROBABILITY, new WordProbabilityBolt(), wordProbThreads) .fieldsGrouping(Component.TOKENIZER, Stream.TRAINING, new Fields(Field.WORD)) .fieldsGrouping(Component.TOKENIZER, Stream.ANALYSIS, new Fields(Field.WORD)) .allGrouping(Component.TOKENIZER, Stream.TRAINING_SUM); builder.setBolt(Component.BAYES_RULE, new BayesRuleBolt(), bayesRuleThreads) .fieldsGrouping(Component.WORD_PROBABILITY, new Fields(Field.ID)); builder.setBolt(Component.SINK, sink, sinkThreads) .shuffleGrouping(Component.BAYES_RULE); return builder.createTopology(); }
.allGrouping(Component.GLOBAL_MEDIAN) .fieldsGrouping(Component.PLUG_MEDIAN, new Fields(Field.PLUG_SPECIFIC_KEY));
.allGrouping(Component.VARIATION_DETECTOR); .allGrouping(Component.GLOBAL_ACD);