public static ValuePointer[] buildIndex(Fields fieldsOrder, Map<String, ValuePointer> pointers) { if(fieldsOrder.size()!=pointers.size()) { throw new IllegalArgumentException("Fields order must be same length as pointers map"); } ValuePointer[] ret = new ValuePointer[pointers.size()]; for(int i=0; i<fieldsOrder.size(); i++) { ret[i] = pointers.get(fieldsOrder.get(i)); } return ret; }
public MapCombinerAggStateUpdater(CombinerAggregator agg, Fields groupFields, Fields inputFields) { _agg = agg; _groupFields = groupFields; _inputFields = inputFields; if (inputFields.size() != 1) { throw new IllegalArgumentException("Combiner aggs only take a single field as input. Got this instead: " + inputFields.toString()); } _factory = new ComboList.Factory(groupFields.size(), inputFields.size()); }
public static void test() { TopologyBuilder builder = new TopologyBuilder(); int spoutNum = JStormUtils.parseInt(conf.get("spout.num"), 8); int countNum = JStormUtils.parseInt(conf.get("count.num"), 8); builder.setSpout("spout", new InOrderSpout(), spoutNum); builder.setBolt("count", new Check(), countNum).fieldsGrouping("spout", new Fields("c1")); String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\."); String topologyName = className[className.length - 1]; try { JStormHelper.runTopology(builder.createTopology(), topologyName, conf, 60, new JStormHelper.CheckAckedFail(conf), isLocal); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); Assert.fail("Failed"); } }
public static void test() { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", new InOrderSpout(), 8); builder.setBolt("count", new Check(), 8).fieldsGrouping("spout", new Fields("c1")); Config conf = new Config(); conf.setMaxSpoutPending(20); String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\."); String topologyName = className[className.length - 1]; if (isLocal) { drpc = new LocalDRPC(); } try { JStormHelper.runTopology(buildTopology(drpc), topologyName, conf, 60, new DrpcValidator(), isLocal); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); Assert.fail("Failed"); } }
public static void test() { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", new RandomSentenceSpout(), 5); builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout"); builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word")); Config conf = new Config(); conf.setDebug(true); String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\."); String topologyName = className[className.length - 1]; try { JStormHelper.runTopology(builder.createTopology(), topologyName, conf, 60, new JStormHelper.CheckAckedFail(conf), isLocal); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); Assert.fail("Failed"); } }
public StormTopology buildTopology() { Config conf = getConf(); TopologyBuilder builder = new TopologyBuilder(); int spout_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1); int bolt_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2); builder.setSpout(SequenceTopologyDef.SEQUENCE_SPOUT_NAME, new SequenceSpout(), spout_Parallelism_hint); boolean isEnableSplit = JStormUtils.parseBoolean(conf.get("enable.split"), false); builder.setBolt(SequenceTopologyDef.TOTAL_BOLT_NAME, new TotalCount(), bolt_Parallelism_hint) .localFirstGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME); } else { builder.setBolt(SequenceTopologyDef.SPLIT_BOLT_NAME, new SplitRecord(), bolt_Parallelism_hint) .localOrShuffleGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME); builder.setBolt(SequenceTopologyDef.TRADE_BOLT_NAME, new PairCount(), bolt_Parallelism_hint) .shuffleGrouping(SequenceTopologyDef.SPLIT_BOLT_NAME, SequenceTopologyDef.TRADE_STREAM_ID); builder.setBolt(SequenceTopologyDef.CUSTOMER_BOLT_NAME, new PairCount(), bolt_Parallelism_hint) .shuffleGrouping(SequenceTopologyDef.SPLIT_BOLT_NAME, SequenceTopologyDef.CUSTOMER_STREAM_ID); builder.setBolt(SequenceTopologyDef.MERGE_BOLT_NAME, new MergeRecord(), bolt_Parallelism_hint) .fieldsGrouping(SequenceTopologyDef.TRADE_BOLT_NAME, new Fields("ID")) .fieldsGrouping(SequenceTopologyDef.CUSTOMER_BOLT_NAME, new Fields("ID")); builder.setBolt(SequenceTopologyDef.TOTAL_BOLT_NAME, new TotalCount(), bolt_Parallelism_hint)
final String PREPARE_ID = "prepare-request"; TopologyBuilder builder = new TopologyBuilder(); builder.setSpout(SPOUT_ID, spout); builder.setBolt(PREPARE_ID, new PrepareRequest()).noneGrouping(SPOUT_ID); int i = 0; for (; i < _components.size(); i++) { declarer.addConfigurations(conf); declarer.fieldsGrouping(idSpec.getGlobalStreamId().get_componentId(), PrepareRequest.ID_STREAM, new Fields("request")); builder.setBolt("JoinResult", new JoinResult(PREPARE_ID)).fieldsGrouping(boltId(i - 1), outputStream, new Fields(fields.get(0))) .fieldsGrouping(PREPARE_ID, PrepareRequest.RETURN_STREAM, new Fields("request")); i++; builder.setBolt("ReturnResults", new ReturnResults()).noneGrouping("JoinResult");
public StormTopology buildTopology(String topic) { SpoutConfig kafkaConfig = new SpoutConfig(brokerHosts, topic, "", "xlog_"+topic); kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme()); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("KafkaSpout", new KafkaSpout(kafkaConfig), 2).setNumTasks(8); builder.setBolt("SplitBolt", new SplitSentence(), 1).setNumTasks(2).shuffleGrouping("KafkaSpout"); builder.setBolt("XlogBolt", new XlogBolt(), 4).setNumTasks(8).fieldsGrouping("SplitBolt", new Fields("ip")); return builder.createTopology(); }
@Override public void declareOutputFields(OutputFieldsDeclarer declarer) { declarer.declareStream("multi", new Fields("id", "word")); declarer.declareStream("single", new Fields("id", "word")); }
int bolt_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2); builder.setSpout(SequenceTopologyDef.SEQUENCE_SPOUT_NAME, new SequenceSpout(), spout_Parallelism_hint); BoltDeclarer boltDeclarer = builder.setBolt(SequenceTopologyDef.TOTAL_BOLT_NAME, new TotalCount(), bolt_Parallelism_hint); boltDeclarer.shuffleGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME) .allGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME, SequenceTopologyDef.CONTROL_STREAM_ID) .addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 3); } else { builder.setBolt(SequenceTopologyDef.SPLIT_BOLT_NAME, new SplitRecord(), bolt_Parallelism_hint) .localOrShuffleGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME); builder.setBolt(SequenceTopologyDef.MERGE_BOLT_NAME, new MergeRecord(), bolt_Parallelism_hint) .fieldsGrouping(SequenceTopologyDef.TRADE_BOLT_NAME, new Fields("ID")) .fieldsGrouping(SequenceTopologyDef.CUSTOMER_BOLT_NAME, new Fields("ID")); builder.setBolt(SequenceTopologyDef.TOTAL_BOLT_NAME, new TotalCount(), bolt_Parallelism_hint) boolean useJavaSer = JStormUtils.parseBoolean(conf.get("fall.back.on.java.serialization"), true); Config.setFallBackOnJavaSerialization(conf, useJavaSer); Config.registerSerialization(conf, TradeCustomer.class, TradeCustomerSerializer.class); Config.registerSerialization(conf, Pair.class, PairSerializer.class);
private void wireTopology() throws InterruptedException { String spoutId = "wordGenerator"; String counterId = "counter"; String intermediateRankerId = "intermediateRanker"; String totalRankerId = "finalRanker"; builder.setSpout(spoutId, new TestWordSpout(), 5); builder.setBolt(counterId, new RollingCountBolt(9, 3), 4).fieldsGrouping(spoutId, new Fields("word")); builder.setBolt(intermediateRankerId, new IntermediateRankingsBolt(TOP_N), 4).fieldsGrouping(counterId, new Fields("obj")); builder.setBolt(totalRankerId, new TotalRankingsBolt(TOP_N)).globalGrouping(intermediateRankerId); }
public static void test() throws Exception { TransactionTopologyBuilder builder = new TransactionTopologyBuilder(); if (isLocal) { conf.put("tuple.num.per.batch", 5); conf.put("transaction.scheduler.spout", false); conf.put("transaction.exactly.cache.type", "default"); } int spoutParallelism = JStormUtils.parseInt(conf.get(SPOUT_PARALLELISM_HINT), 1); int splitParallelism = JStormUtils.parseInt(conf.get(SPLIT_PARALLELISM_HINT), 2); int countParallelism = JStormUtils.parseInt(conf.get(COUNT_PARALLELISM_HINT), 2); boolean isScheduleSpout = JStormUtils.parseBoolean(conf.get("transaction.scheduler.spout"), true); if (isScheduleSpout) // Generate batch by configured time. "transaction.schedule.batch.delay.ms: 1000 # 1sec" builder.setSpout("spout", new ScheduleTxSpout(), spoutParallelism); else // Generate batch by user when calling emitBarrier builder.setSpout("spout", new BasicTxSpout(), spoutParallelism, false); builder.setBolt("split", new TxSplitSentence(), splitParallelism).localOrShuffleGrouping("spout"); builder.setBolt("count", new TxWordCount(), countParallelism).fieldsGrouping("split", new Fields("word")); String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\."); String topologyName = className[className.length - 1]; StormSubmitter.submitTopology(topologyName, conf, builder.createTopology()); }
public static void connectNewBolt(TopologyBuilder builder) { final JoinBolt b = new JoinBolt(); builder.setBolt("joinBolt", b, 2).fieldsGrouping("randomSpout1", new Fields("field_0")) .fieldsGrouping("randomSpout2", new Fields("field_1")); }
public void declareOutputFields(OutputFieldsDeclarer declarer) { declarer.declare(new Fields("ID", "RECORD")); declarer.declareStream(SequenceTopologyDef.CONTROL_STREAM_ID, new Fields("CONTROL")); // declarer.declare(new Fields("ID")); }
/** * Persist source data stream as metric. */ public BuilderContext saveAsMetric(MetricDescriptor metricDescriptor) { String metricDataID = generateId("MetricDataSink"); String metricSchemaID = generateId("MetricSchemaGenerator"); topologyBuilder.setBolt(metricDataID, new MetricStreamPersist(metricDescriptor, appConfig)).shuffleGrouping(getId()); topologyBuilder.setBolt(metricSchemaID, new MetricSchemaGenerator(metricDescriptor,appConfig)).fieldsGrouping(metricDataID,new Fields(MetricStreamPersist.METRIC_NAME_FIELD)); return this; }
public static void test() { MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH); TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 1); builder.setBolt("partial-count", new BatchCount(), 2).noneGrouping("spout"); builder.setBolt("sum", new UpdateGlobalCount(), 1).globalGrouping("partial-count"); conf.setDebug(true); conf.setMaxSpoutPending(3); String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\."); String topologyName = className[className.length - 1]; try { JStormHelper.runTopology(builder.buildTopology(), topologyName, conf, 60, new JStormHelper.CheckAckedFail(conf), isLocal); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); Assert.fail("Failed"); } }
public static void test() { MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH); TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("top-n-words", "spout", spout, 2); builder.setBolt("count", new KeyedCountUpdater(), 5).fieldsGrouping("spout", new Fields("word")); builder.setBolt("bucketize", new Bucketize()).noneGrouping("count"); builder.setBolt("buckets", new BucketCountUpdater(), 5).fieldsGrouping("bucketize", new Fields("bucket")); String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\."); String topologyName = className[className.length - 1]; try { JStormHelper.runTopology(builder.buildTopology(), topologyName, conf, 60, new JStormHelper.CheckAckedFail(conf), isLocal); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); Assert.fail("Failed"); } }
@Override public void declareOutputFields(OutputFieldsDeclarer declarer) { List<String> fields = new ArrayList<>(_spout.getOutputFields().toList()); fields.add(0, ID_FIELD); declarer.declareStream(_streamName, new Fields(fields)); }