/** * Submits a topology to run on the cluster. A topology runs forever or until explicitly killed. * * @param name the name of the storm. * @param stormConf the topology-specific configuration. See {@link Config}. * @param topology the processing to execute. * @throws AlreadyAliveException if a topology with this name is already running * @throws InvalidTopologyException if an invalid topology was submitted */ public static void submitTopology(String name, Map stormConf, StormTopology topology) throws AlreadyAliveException, InvalidTopologyException { submitTopology(name, stormConf, topology, null); }
public static void submitTopology(String name, Map stormConf, StormTopology topology, SubmitOptions opts, ProgressListener listener) throws AlreadyAliveException, InvalidTopologyException { submitTopology(name, stormConf, topology, opts); }
/** * Submits a topology to run on the cluster with a progress bar. A topology runs forever or until explicitly killed. * * @param name the name of the storm. * @param stormConf the topology-specific configuration. See {@link Config}. * @param topology the processing to execute. * @param opts to manipulate the starting of the topology * @throws AlreadyAliveException if a topology with this name is already running * @throws InvalidTopologyException if an invalid topology was submitted */ public static void submitTopologyWithProgressBar(String name, Map stormConf, StormTopology topology, SubmitOptions opts) throws AlreadyAliveException, InvalidTopologyException { /** * progress bar is removed in jstorm */ submitTopology(name, stormConf, topology, opts); }
public static void submitTopology(String name, Map stormConf, StormTopology topology, SubmitOptions opts, List<File> jarFiles) throws AlreadyAliveException, InvalidTopologyException { if (jarFiles == null) { jarFiles = new ArrayList<>(); } Map<String, String> jars = new HashMap<>(jarFiles.size()); List<String> names = new ArrayList<>(jarFiles.size()); for (File f : jarFiles) { if (!f.exists()) { LOG.info(f.getName() + " does not exist: " + f.getAbsolutePath()); continue; } jars.put(f.getName(), f.getAbsolutePath()); names.add(f.getName()); } LOG.info("Files: " + names + " will be loaded"); stormConf.put(GenericOptionsParser.TOPOLOGY_LIB_PATH, jars); stormConf.put(GenericOptionsParser.TOPOLOGY_LIB_NAME, names); submitTopology(name, stormConf, topology, opts); }
public static void SetRemoteTopology() throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException { String streamName = (String) conf.get(Config.TOPOLOGY_NAME); if (streamName == null) { streamName = "SequenceTest"; } TopologyBuilder builder = new TopologyBuilder(); SetBuilder(builder, conf); conf.put(Config.STORM_CLUSTER_MODE, "distributed"); StormSubmitter.submitTopology(streamName, conf, builder.createTopology()); }
private static void submitTopology(TopologyBuilder builder) { try { if (local_mode(conf)) { LocalCluster cluster = new LocalCluster(); cluster.submitTopology(String.valueOf(conf.get("topology.name")), conf, builder.createTopology()); Thread.sleep(200000); cluster.shutdown(); } else { StormSubmitter.submitTopology(String.valueOf(conf.get("topology.name")), conf, builder.createTopology()); } } catch (Exception e) { e.printStackTrace(); } }
public static void SetRemoteTopology() throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException { String streamName = (String) conf.get(Config.TOPOLOGY_NAME); if (streamName == null) { String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\."); streamName = className[className.length - 1]; } TopologyBuilder builder = new TopologyBuilder(); int spout_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1); int bolt_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2); builder.setSpout("spout", new TestSpout(), spout_Parallelism_hint); BoltDeclarer boltDeclarer = builder.setBolt("bolt", new TestBolt(), bolt_Parallelism_hint); // localFirstGrouping is only for jstorm // boltDeclarer.localFirstGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME); boltDeclarer.shuffleGrouping("spout"); // .addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 60); conf.put(Config.STORM_CLUSTER_MODE, "distributed"); StormSubmitter.submitTopology(streamName, conf, builder.createTopology()); }
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException { Map config = new Config(); config.put(ConfigExtension.TOPOLOGY_MASTER_USER_DEFINED_STREAM_CLASS, "com.alipay.dw.jstorm.example.tm.TMUdfHandler"); config.put(Config.TOPOLOGY_WORKERS, 2); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("TMUdfSpout", new TMUdfSpout(), 2); builder.setBolt("TMUdfBolt", new TMUdfBolt(), 4); StormTopology topology = builder.createTopology(); StormSubmitter.submitTopology("TMUdfTopology", config, topology); } }
public static void main(String[] args) throws Exception { Map conf = JStormHelper.getConfig(args); int spoutParallelism = JStormUtils.parseInt(conf.get(SPOUT_PARALLELISM_HINT), 1); int splitParallelism = JStormUtils.parseInt(conf.get(SPLIT_PARALLELISM_HINT), 2); int countParallelism = JStormUtils.parseInt(conf.get(COUNT_PARALLELISM_HINT), 2); TransactionTopologyBuilder builder = new TransactionTopologyBuilder(); builder.setSpout("spout", new ScheduleTxSpout(), spoutParallelism); builder.setBolt("split", new TxSplitSentence(), splitParallelism).localOrShuffleGrouping("spout"); builder.setBolt("count", new RocksDbCount(), countParallelism).fieldsGrouping("split", new Fields("word")); builder.enableHdfs(); String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\."); String topologyName = className[className.length - 1]; StormSubmitter.submitTopology(topologyName, conf, builder.createTopology()); } }
public void SetRemoteTopology() throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException { Config conf = getConf(); StormTopology topology = buildTopology(); conf.put(Config.STORM_CLUSTER_MODE, "distributed"); String streamName = (String) conf.get(Config.TOPOLOGY_NAME); if (streamName == null) { streamName = "SequenceTest"; } if (streamName.contains("zeromq")) { conf.put(Config.STORM_MESSAGING_TRANSPORT, "com.alibaba.jstorm.message.zeroMq.MQContext"); } else { conf.put(Config.STORM_MESSAGING_TRANSPORT, "com.alibaba.jstorm.message.netty.NettyContext"); } StormSubmitter.submitTopology(streamName, conf, topology); }
StormSubmitter.submitTopology(_name + "_" + topoNum, conf, builder.createTopology());
public static void runTopologyRemotely(StormTopology topology, String topologyName, Config conf, int runtimeInSeconds, Callback callback) throws Exception { if (conf.get(Config.TOPOLOGY_WORKERS) == null) { conf.setNumWorkers(3); } StormSubmitter.submitTopology(topologyName, conf, topology); if (JStormUtils.parseBoolean(conf.get("RUN_LONG_TIME"), false)) { LOG.info(topologyName + " will run long time"); return; } if (runtimeInSeconds < 120) { JStormUtils.sleepMs(120 * 1000); } else { JStormUtils.sleepMs(runtimeInSeconds * 1000); } if (callback != null) { callback.execute(topologyName); } killTopology(conf, topologyName); }
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException { Config conf = JStormHelper.getConfig(args); int spoutParallelism = JStormUtils.parseInt(conf.get(SPOUT_PARALLELISM_HINT), 1); int splitParallelism = JStormUtils.parseInt(conf.get(SPLIT_PARALLELISM_HINT), 2); int countParallelism = JStormUtils.parseInt(conf.get(COUNT_PARALLELISM_HINT), 2); boolean isValueSpout = JStormUtils.parseBoolean(conf.get("is.value.spout"), false); TransactionTopologyBuilder builder = new TransactionTopologyBuilder(); if (isValueSpout) builder.setSpoutWithAck("spout", new BatchAckerValueSpout(), spoutParallelism); else builder.setSpoutWithAck("spout", new BatchAckerSpout(), spoutParallelism); builder.setBoltWithAck("split", new BatchAckerSplit(), splitParallelism).localOrShuffleGrouping("spout");; builder.setBoltWithAck("count", new BatchAckerCount(), countParallelism).fieldsGrouping("split", new Fields("word")); String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\."); String topologyName = className[className.length - 1]; StormSubmitter.submitTopology(topologyName, conf, builder.createTopology()); } }
StormSubmitter.submitTopology(TOPOLOGY_NAME, conf, builder.createRemoteTopology()); } else {
public static void test() throws Exception { TransactionTopologyBuilder builder = new TransactionTopologyBuilder(); if (isLocal) { conf.put("tuple.num.per.batch", 100); conf.put("transaction.scheduler.spout", false); conf.put("transaction.exactly.cache.type", "default"); conf.put("transaction.topology", true); } int spoutParallelismHint = JStormUtils.parseInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1); int splitParallelismHint = JStormUtils.parseInt(conf.get(TOPOLOGY_SPLIT_PARALLELISM_HINT), 1); int countParallelismHint = JStormUtils.parseInt(conf.get(TOPOLOGY_COUNT_PARALLELISM_HINT), 1); builder.setSpout("spout", new TxFastRandomSentenceSpout(), spoutParallelismHint); builder.setBolt("split", new TxSplitSentence(), splitParallelismHint).localOrShuffleGrouping("spout"); WordCount wordCount = new WordCount(); builder.setBolt("count", wordCount .timeWindow(Time.seconds(60L)) .withTransactionStateOperator(wordCount), countParallelismHint).fieldsGrouping("split", new Fields("word")); builder.enableHdfs(); String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\."); String topologyName = className[className.length - 1]; StormSubmitter.submitTopology(topologyName, conf, builder.createTopology()); }
public static void test() throws Exception { TransactionTopologyBuilder builder = new TransactionTopologyBuilder(); if (isLocal) { conf.put("tuple.num.per.batch", 5); conf.put("transaction.scheduler.spout", false); conf.put("transaction.exactly.cache.type", "default"); } int spoutParallelism = JStormUtils.parseInt(conf.get(SPOUT_PARALLELISM_HINT), 1); int splitParallelism = JStormUtils.parseInt(conf.get(SPLIT_PARALLELISM_HINT), 2); int countParallelism = JStormUtils.parseInt(conf.get(COUNT_PARALLELISM_HINT), 2); boolean isScheduleSpout = JStormUtils.parseBoolean(conf.get("transaction.scheduler.spout"), true); if (isScheduleSpout) // Generate batch by configured time. "transaction.schedule.batch.delay.ms: 1000 # 1sec" builder.setSpout("spout", new ScheduleTxSpout(), spoutParallelism); else // Generate batch by user when calling emitBarrier builder.setSpout("spout", new BasicTxSpout(), spoutParallelism, false); builder.setBolt("split", new TxSplitSentence(), splitParallelism).localOrShuffleGrouping("spout"); builder.setBolt("count", new TxWordCount(), countParallelism).fieldsGrouping("split", new Fields("word")); String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\."); String topologyName = className[className.length - 1]; StormSubmitter.submitTopology(topologyName, conf, builder.createTopology()); }
@Override public void submitTopology(StormToolOptions options) throws Exception { final Config conf = options.prepareConfig(); logger.info("\nStarting topology: \n" + conf); StormSubmitter.submitTopology(options.topologyName(), conf, options.constructTopology()); }
@Override public void submitTopology(StormToolOptions options) throws Exception { final Config conf = options.prepareConfig(); logger.info("\nStarting topology: \n" + conf); StormSubmitter.submitTopology(options.topologyName(), conf, options.constructTopology()); }
public static void main(String[] args) throws Exception { Config conf = new Config(); if (args.length == 2) { // Ready & submit the topology String name = args[0]; BrokerHosts hosts = new ZkHosts(args[1]); TransactionalTridentKafkaSpout kafkaSpout = TestUtils.testTweetSpout(hosts); StormSubmitter.submitTopology(name, conf, buildTopology(kafkaSpout)); }else{ System.err.println("<topologyName> <zookeeperHost>"); } }
public void startTopology(String topologyName) throws Exception { Optional<Topology> tdop = TopologyMgmtResourceHelper.findById(dao.listTopologies(), topologyName); Topology topologyDef; if (tdop.isPresent()) { topologyDef = tdop.get(); } else { topologyDef = new Topology(); topologyDef.setName(topologyName); } StormSubmitter.submitTopology(topologyName, getStormConf(null, topologyDef.getClusterName()), createTopology(topologyDef)); }