public static void main(String[] args) throws Exception { Map conf = JStormHelper.getConfig(args); int spoutParallelism = JStormUtils.parseInt(conf.get(SPOUT_PARALLELISM_HINT), 1); int splitParallelism = JStormUtils.parseInt(conf.get(SPLIT_PARALLELISM_HINT), 2); int countParallelism = JStormUtils.parseInt(conf.get(COUNT_PARALLELISM_HINT), 2); TransactionTopologyBuilder builder = new TransactionTopologyBuilder(); builder.setSpout("spout", new ScheduleTxSpout(), spoutParallelism); builder.setBolt("split", new TxSplitSentence(), splitParallelism).localOrShuffleGrouping("spout"); builder.setBolt("count", new RocksDbCount(), countParallelism).fieldsGrouping("split", new Fields("word")); builder.enableHdfs(); String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\."); String topologyName = className[className.length - 1]; StormSubmitter.submitTopology(topologyName, conf, builder.createTopology()); } }
builder.setBolt(BOLT1_NAME, new CheckBolt(false), 3).localOrShuffleGrouping(SPOUT_NAME); builder.setBolt(BOLT2_NAME, new CheckBolt(true), 3).localOrShuffleGrouping(SPOUT_NAME);
public static void test() { int spout_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1); int split_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_SPLIT_PARALLELISM_HINT), 1); int count_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_COUNT_PARALLELISM_HINT), 2); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", new FastRandomSentenceSpout(), spout_Parallelism_hint); builder.setBolt("split", new SplitSentence(), split_Parallelism_hint).localOrShuffleGrouping("spout"); builder.setBolt("count", new WordCount(), count_Parallelism_hint).fieldsGrouping("split", new Fields("word")); String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\."); String topologyName = className[className.length - 1]; isLocal = JStormHelper.localMode(conf); try { JStormHelper.runTopology(builder.createTopology(), topologyName, conf, 60, new JStormHelper.CheckAckedFail(conf), isLocal); } catch (Exception e) { e.printStackTrace(); Assert.fail("Failed"); } }
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException { Config conf = JStormHelper.getConfig(args); int spoutParallelism = JStormUtils.parseInt(conf.get(SPOUT_PARALLELISM_HINT), 1); int splitParallelism = JStormUtils.parseInt(conf.get(SPLIT_PARALLELISM_HINT), 2); int countParallelism = JStormUtils.parseInt(conf.get(COUNT_PARALLELISM_HINT), 2); boolean isValueSpout = JStormUtils.parseBoolean(conf.get("is.value.spout"), false); TransactionTopologyBuilder builder = new TransactionTopologyBuilder(); if (isValueSpout) builder.setSpoutWithAck("spout", new BatchAckerValueSpout(), spoutParallelism); else builder.setSpoutWithAck("spout", new BatchAckerSpout(), spoutParallelism); builder.setBoltWithAck("split", new BatchAckerSplit(), splitParallelism).localOrShuffleGrouping("spout");; builder.setBoltWithAck("count", new BatchAckerCount(), countParallelism).fieldsGrouping("split", new Fields("word")); String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\."); String topologyName = className[className.length - 1]; StormSubmitter.submitTopology(topologyName, conf, builder.createTopology()); } }
public static void test() throws Exception { TransactionTopologyBuilder builder = new TransactionTopologyBuilder(); if (isLocal) { conf.put("tuple.num.per.batch", 5); conf.put("transaction.scheduler.spout", false); conf.put("transaction.exactly.cache.type", "default"); } int spoutParallelism = JStormUtils.parseInt(conf.get(SPOUT_PARALLELISM_HINT), 1); int splitParallelism = JStormUtils.parseInt(conf.get(SPLIT_PARALLELISM_HINT), 2); int countParallelism = JStormUtils.parseInt(conf.get(COUNT_PARALLELISM_HINT), 2); boolean isScheduleSpout = JStormUtils.parseBoolean(conf.get("transaction.scheduler.spout"), true); if (isScheduleSpout) // Generate batch by configured time. "transaction.schedule.batch.delay.ms: 1000 # 1sec" builder.setSpout("spout", new ScheduleTxSpout(), spoutParallelism); else // Generate batch by user when calling emitBarrier builder.setSpout("spout", new BasicTxSpout(), spoutParallelism, false); builder.setBolt("split", new TxSplitSentence(), splitParallelism).localOrShuffleGrouping("spout"); builder.setBolt("count", new TxWordCount(), countParallelism).fieldsGrouping("split", new Fields("word")); String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\."); String topologyName = className[className.length - 1]; StormSubmitter.submitTopology(topologyName, conf, builder.createTopology()); }
public static void test() throws Exception { TransactionTopologyBuilder builder = new TransactionTopologyBuilder(); if (isLocal) { conf.put("tuple.num.per.batch", 100); conf.put("transaction.scheduler.spout", false); conf.put("transaction.exactly.cache.type", "default"); conf.put("transaction.topology", true); } int spoutParallelismHint = JStormUtils.parseInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1); int splitParallelismHint = JStormUtils.parseInt(conf.get(TOPOLOGY_SPLIT_PARALLELISM_HINT), 1); int countParallelismHint = JStormUtils.parseInt(conf.get(TOPOLOGY_COUNT_PARALLELISM_HINT), 1); builder.setSpout("spout", new TxFastRandomSentenceSpout(), spoutParallelismHint); builder.setBolt("split", new SplitSentence(), splitParallelismHint).localOrShuffleGrouping("spout"); builder.setBolt("count", new WordCount() .timeWindow(Time.seconds(1L)) .withStateSize(Time.hours(2)), countParallelismHint).fieldsGrouping("split", new Fields("word")); String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\."); String topologyName = className[className.length - 1]; JStormHelper.runTopology(builder.createTopology(), topologyName, conf, 60, new JStormHelper.CheckAckedFail(conf), true); }
public static void test() throws Exception { TransactionTopologyBuilder builder = new TransactionTopologyBuilder(); if (isLocal) { conf.put("tuple.num.per.batch", 100); conf.put("transaction.scheduler.spout", false); conf.put("transaction.exactly.cache.type", "default"); conf.put("transaction.topology", true); } int spoutParallelismHint = JStormUtils.parseInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1); int splitParallelismHint = JStormUtils.parseInt(conf.get(TOPOLOGY_SPLIT_PARALLELISM_HINT), 1); int countParallelismHint = JStormUtils.parseInt(conf.get(TOPOLOGY_COUNT_PARALLELISM_HINT), 1); builder.setSpout("spout", new TxFastRandomSentenceSpout(), spoutParallelismHint); builder.setBolt("split", new TxSplitSentence(), splitParallelismHint).localOrShuffleGrouping("spout"); WordCount wordCount = new WordCount(); builder.setBolt("count", wordCount .timeWindow(Time.seconds(60L)) .withTransactionStateOperator(wordCount), countParallelismHint).fieldsGrouping("split", new Fields("word")); builder.enableHdfs(); String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\."); String topologyName = className[className.length - 1]; StormSubmitter.submitTopology(topologyName, conf, builder.createTopology()); }
.localOrShuffleGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME);
.localOrShuffleGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME);
void configureStreamGrouping(String prevComponent, Configuration boltConf, BoltDeclarer declarer) throws ConfigurationException { String streamType = boltConf.getString(STREAM_GROUPING_CONF_TYPE, STREAM_GROUPING_LOCAL_OR_SHUFFLE); String streamId = boltConf.getString(STREAM_ID, Utils.DEFAULT_STREAM_ID); if (StringUtils.equals(streamType, STREAM_GROUPING_FIELDS)) { configureStreamFieldsGrouping(prevComponent, streamId, boltConf, declarer); } else if (StringUtils.equals(streamType, STREAM_GROUPING_LOCAL_OR_SHUFFLE)) { declarer.localOrShuffleGrouping(prevComponent, streamId); } else { declarer.shuffleGrouping(prevComponent, streamId); } }
private static void declarebolt(TopologyBuilder builder, String boltName, IRichBolt bolt, int parallelism, boolean control) { BoltDeclarer declarer = builder.setBolt(boltName, bolt, parallelism) .allGrouping(FLOW_LOADER_STREAM, FLOW_LOADER_STREAM) .allGrouping("tick", "tick") .localOrShuffleGrouping(INITIALIZER, boltName) .localOrShuffleGrouping(FILTER, boltName) .fieldsGrouping(PARTITION, boltName, new Fields(FLOW_ID, PARTITION)) // guaranteed partitions will always group the same flow for flows that have joins with default partitions. .localOrShuffleGrouping(AGGREGATE, boltName) .localOrShuffleGrouping(SELECT, boltName) .localOrShuffleGrouping(EACH, boltName) .localOrShuffleGrouping(SORT, boltName) .localOrShuffleGrouping(SWITCH, boltName) .localOrShuffleGrouping(SPLIT, boltName) .localOrShuffleGrouping(JOIN, boltName); if(control) { // control stream is all-grouped declarer.allGrouping(INITIALIZER, BROADCAST_STREAM + boltName) .allGrouping(FILTER, BROADCAST_STREAM + boltName) .allGrouping(PARTITION, BROADCAST_STREAM + boltName) .allGrouping(AGGREGATE, BROADCAST_STREAM + boltName) .allGrouping(SELECT, BROADCAST_STREAM + boltName) .allGrouping(EACH, BROADCAST_STREAM + boltName) .allGrouping(SORT, BROADCAST_STREAM + boltName) .allGrouping(SWITCH, BROADCAST_STREAM + boltName) .allGrouping(SPLIT, BROADCAST_STREAM + boltName) .allGrouping(JOIN, BROADCAST_STREAM + boltName); } }
.localOrShuffleGrouping(EVENT) .allGrouping(FLOW_LOADER_STREAM, FLOW_LOADER_STREAM);
break; case LOCAL_OR_SHUFFLE: declarer.localOrShuffleGrouping(stream.getFrom(), streamId); break; case NONE: