/** * Returns a stream consisting of the elements of this stream that match the given filter. * * @param inputFields the fields of the input trident tuple to be selected. * @param filter the filter to apply to each trident tuple to determine if it should be included. * @return the new stream */ public Stream filter(Fields inputFields, Filter filter) { return each(inputFields, filter); }
public Stream each(Function function, Fields functionFields) { return each(null, function, functionFields); }
@Override public IAggregatableStream each(Fields inputFields, Function function, Fields functionFields) { Stream s = _stream.each(inputFields, function, functionFields); return new GroupedStream(s, _groupFields); }
/** * Returns a stream consisting of the elements of this stream that match the given filter. * * @param filter the filter to apply to each trident tuple to determine if it should be included. * @return the new stream */ public Stream filter(Filter filter) { return each(getOutputFields(), filter); }
public Stream each(Fields inputFields, Filter filter) { return each(inputFields, new FilterExecutor(filter), new Fields()); }
/** * Creates a topology with device-id and count (which are whole numbers) as * tuple fields in a stream and it finally generates result stream based on * min amd max with device-id and count values. */ public static StormTopology buildDevicesTopology() { String deviceID = "device-id"; String count = "count"; Fields allFields = new Fields(deviceID, count); RandomNumberGeneratorSpout spout = new RandomNumberGeneratorSpout(allFields, 10, 1000); TridentTopology topology = new TridentTopology(); Stream devicesStream = topology.newStream("devicegen-spout", spout).each(allFields, new Debug("##### devices")); devicesStream.minBy(deviceID).each(allFields, new Debug("#### device with min id")); devicesStream.maxBy(count).each(allFields, new Debug("#### device with max count")); return topology.build(); }
Stream vehiclesStream = topology.newStream("spout1", spout).each(allFields, new Debug("##### vehicles")); Stream slowVehiclesStream = vehiclesStream.min(new SpeedComparator()).each(vehicleField, new Debug("#### slowest vehicle")); Stream slowDriversStream = slowVehiclesStream.project(driverField).each(driverField, new Debug("##### slowest driver")); vehiclesStream.max(new SpeedComparator()).each(vehicleField, new Debug("#### fastest vehicle")) .project(driverField).each(driverField, new Debug("##### fastest driver")); vehiclesStream.max(new EfficiencyComparator()).each(vehicleField, new Debug("#### efficient vehicle"));
Stream vehiclesStream = topology.newStream("spout1", spout).each(allFields, new Debug("##### vehicles")); Stream slowVehiclesStream = vehiclesStream.min(new SpeedComparator()).each(vehicleField, new Debug("#### slowest vehicle")); Stream slowDriversStream = slowVehiclesStream.project(driverField).each(driverField, new Debug("##### slowest driver")); vehiclesStream.max(new SpeedComparator()).each(vehicleField, new Debug("#### fastest vehicle")) .project(driverField).each(driverField, new Debug("##### fastest driver")); vehiclesStream.minBy(Vehicle.FIELD_NAME, new EfficiencyComparator()).each(vehicleField, new Debug("#### least efficient vehicle")); vehiclesStream.maxBy(Vehicle.FIELD_NAME, new EfficiencyComparator()).each(vehicleField, new Debug("#### most efficient vehicle"));
/** * ## Repartitioning Operation * * This method takes in a custom partitioning function that implements * {@link org.apache.storm.grouping.CustomStreamGrouping} * * @param grouping * @return */ public Stream partition(Grouping grouping) { if (_node instanceof PartitionNode) { return each(new Fields(), new TrueFilter()).partition(grouping); } else { return _topology.addSourcedNode(this, new PartitionNode(_node.streamId, _name, getOutputFields(), grouping)); } }
public static StormTopology buildTopology(LocalDRPC drpc) { TridentTopology topology = new TridentTopology(); TridentState urlToTweeters = topology.newStaticState(new StaticSingleKeyMapState.Factory(TWEETERS_DB)); TridentState tweetersToFollowers = topology.newStaticState(new StaticSingleKeyMapState.Factory(FOLLOWERS_DB)); topology.newDRPCStream("reach", drpc) .stateQuery(urlToTweeters, new Fields("args"), new MapGet(), new Fields("tweeters")) .each(new Fields("tweeters"), new ExpandList(), new Fields("tweeter")).shuffle() .stateQuery(tweetersToFollowers, new Fields("tweeter"), new MapGet(), new Fields("followers")) .each(new Fields("followers"), new ExpandList(), new Fields("follower")).groupBy(new Fields("follower")) .aggregate(new One(), new Fields("one")).aggregate(new Fields("one"), new Sum(), new Fields("reach")); return topology.build(); }
public static StormTopology buildTopology(LocalDRPC drpc) { FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3, new Values("the cow jumped over the moon"), new Values("the man went to the store and bought some candy"), new Values("four score and seven years ago"), new Values("how many apples can you eat"), new Values("to be or not to be the person")); spout.setCycle(true); TridentTopology topology = new TridentTopology(); TridentState wordCounts = topology.newStream("spout1", spout).parallelismHint(16) .each(new Fields("sentence"), new Split(), new Fields("word")).groupBy(new Fields("word")) .persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count")) .parallelismHint(16); topology.newDRPCStream("words", drpc).each(new Fields("args"), new Split(), new Fields("word")) .groupBy(new Fields("word")) .stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count")) .each(new Fields("count"), new FilterNull()) .aggregate(new Fields("count"), new Sum(), new Fields("sum")); return topology.build(); }
public static StormTopology buildTopology(WindowsStoreFactory windowStore, WindowConfig windowConfig) throws Exception { FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3, new Values("the cow jumped over the moon"), new Values("the man went to the store and bought some candy"), new Values("four score and seven years ago"), new Values("how many apples can you eat"), new Values("to be or not to be the person")); spout.setCycle(true); TridentTopology topology = new TridentTopology(); Stream stream = topology.newStream("spout1", spout).parallelismHint(16) .each(new Fields("sentence"), new Split(), new Fields("word")) .window(windowConfig, windowStore, new Fields("word"), new CountAsAggregator(), new Fields("count")) .peek(new Consumer() { @Override public void accept(TridentTuple input) { LOG.info("Received tuple: [{}]", input); } }); return topology.build(); }
.each(new Fields("sentence"), new Split(), new Fields("word")).parallelismHint(split_Parallelism_hint).groupBy(new Fields("word")) .persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count")) .parallelismHint(count_Parallelism_hint);
/** * Returns a stream consisting of the elements of this stream that match the given filter. * * @param inputFields the fields of the input trident tuple to be selected. * @param filter the filter to apply to each trident tuple to determine if it should be included. * @return the new stream */ public Stream filter(Fields inputFields, Filter filter) { return each(inputFields, filter); }
@Override public IAggregatableStream each(Fields inputFields, Function function, Fields functionFields) { Stream s = _stream.each(inputFields, function, functionFields); return new GroupedStream(s, _groupFields); }
public static StormTopology buildTopology(LocalDRPC drpc) throws IOException { FakeTweetsBatchSpout spout = new FakeTweetsBatchSpout(); TridentTopology topology = new TridentTopology(); topology.newStream("spout", spout).each(new Fields("id", "text", "actor", "location", "date"), new Utils.PrintFilter()); return topology.build(); }
public static StormTopology buildTopology(TransactionalTridentKafkaSpout spout) throws IOException { TridentTopology topology = new TridentTopology(); topology .newStream("tweets", spout) .each(new Fields("str"), new Print()) ; topology .newDRPCStream("ping"); return topology.build(); }
public Stream partition(Grouping grouping) { if(_node instanceof PartitionNode) { return each(new Fields(), new TrueFilter()).partition(grouping); } else { return _topology.addSourcedNode(this, new PartitionNode(_node.streamId, _name, getOutputFields(), grouping)); } }
public static StormTopology buildTopology(LocalDRPC drpc) throws IOException { FakeTweetsBatchSpout spout = new FakeTweetsBatchSpout(100); TridentTopology topology = new TridentTopology(); topology.newStream("spout", spout) .aggregate(new Fields("location"), new LocationAggregator(), new Fields("location_counts")) .each(new Fields("location_counts"), new Utils.PrintFilter()); return topology.build(); }
public static StormTopology buildTopology(LocalDRPC drpc) throws IOException { FakeTweetsBatchSpout spout = new FakeTweetsBatchSpout(100); TridentTopology topology = new TridentTopology(); topology.newStream("spout", spout) .groupBy(new Fields("location")) .aggregate(new Fields("location"), new Count(), new Fields("count")) .each(new Fields("location", "count"), new Utils.PrintFilter()); return topology.build(); }