@Test public void testStatisticsReporting() { config.set(BulletStormConfig.FILTER_BOLT_STATS_REPORT_TICKS, 10); config.validate(); bolt = ComponentUtils.prepare(new HashMap<>(), new FilterBolt(TopologyConstants.RECORD_COMPONENT, config), collector); Tuple tick = TupleUtils.makeTuple(TupleClassifier.Type.TICK_TUPLE); for (int i = 0; i < 10; ++i) { Assert.assertEquals(bolt.getStatsTickCount(), i); bolt.execute(tick); } Assert.assertEquals(bolt.getStatsTickCount(), 0); } }
@Override protected Querier createQuerier(Querier.Mode mode, String id, String query, BulletConfig config) { Querier spied = spy(super.createQuerier(mode, id, query, config)); List<Boolean> answers = IntStream.range(0, doneAfter).mapToObj(i -> false) .collect(ArrayList::new, ArrayList::add, ArrayList::addAll); answers.add(true); doAnswer(returnsElementsOf(answers)).when(spied).isDone(); return spied; } }
private void emitError(Map.Entry<String, Querier> query) { emit(ERROR_STREAM, query.getKey(), query.getValue().getRateLimitError()); }
private void onTick() { // Categorize queries in partition mode. handleCategorizedQueries(manager.categorize()); handleStats(); }
@Override public void execute(Tuple tuple) { // Check if the tuple is any known type, otherwise make it unknown TupleClassifier.Type type = classifier.classify(tuple).orElse(TupleClassifier.Type.UNKNOWN_TUPLE); switch (type) { case TICK_TUPLE: onTick(); break; case METADATA_TUPLE: onMeta(tuple); break; case QUERY_TUPLE: onQuery(tuple); break; case RECORD_TUPLE: onRecord(tuple); updateLatency(tuple); break; default: // May want to throw an error here instead of not acking log.error("Unknown tuple encountered: {}", type); return; } collector.ack(tuple); }
@Test public void testUnknownTuple() { Tuple query = TupleUtils.makeTuple(TupleClassifier.Type.RESULT_TUPLE, "", ""); bolt.execute(query); Assert.assertFalse(collector.wasAcked(query)); }
@Test public void testFilteringLatency() { config = new BulletStormConfig(); // Don't use the overridden aggregation default size but turn on built in metrics config.set(BulletStormConfig.TOPOLOGY_METRICS_BUILT_IN_ENABLE, true); collector = new CustomCollector(); CustomTopologyContext context = new CustomTopologyContext(); bolt = new FilterBolt(TopologyConstants.RECORD_COMPONENT, config); ComponentUtils.prepare(new HashMap<>(), bolt, context, collector); Tuple query = makeIDTuple(TupleClassifier.Type.QUERY_TUPLE, "42", makeFieldFilterQuery("bar"), METADATA); bolt.execute(query); BulletRecord record = RecordBox.get().add("field", "foo").getRecord(); long start = System.currentTimeMillis(); IntStream.range(0, 10).mapToObj(i -> makeRecordTuple(record, System.currentTimeMillis())) .forEach(bolt::execute); long end = System.currentTimeMillis(); double actualLatecy = context.getDoubleMetric(TopologyConstants.LATENCY_METRIC); Assert.assertTrue(actualLatecy <= end - start); }
@BeforeMethod public void setup() { collector = new CustomCollector(); config = oneRecordConfig(); bolt = ComponentUtils.prepare(new FilterBolt(TopologyConstants.RECORD_COMPONENT, config), collector); }
@Test public void testDuplicateQueryIds() { Tuple queryA = makeIDTuple(TupleClassifier.Type.QUERY_TUPLE, "42", makeFieldFilterQuery("b235gf23b"), METADATA); Tuple queryB = makeIDTuple(TupleClassifier.Type.QUERY_TUPLE, "43", makeFilterQuery("timestamp", asList("1", "2", "3", "45"), NOT_EQUALS), METADATA); Assert.assertEquals(bolt.getManager().size(), 0); bolt.execute(queryA); bolt.execute(queryB); Assert.assertEquals(bolt.getManager().size(), 2); bolt.execute(queryA); bolt.execute(queryB); Assert.assertEquals(bolt.getManager().size(), 2); }
private void onRecord(Tuple tuple) { BulletRecord record = (BulletRecord) tuple.getValue(TopologyConstants.RECORD_POSITION); handleCategorizedQueries(manager.categorize(record)); }
@Test public void testOutputFields() { CustomOutputFieldsDeclarer declarer = new CustomOutputFieldsDeclarer(); bolt.declareOutputFields(declarer); Fields expected = new Fields(TopologyConstants.ID_FIELD, TopologyConstants.DATA_FIELD); Assert.assertTrue(declarer.areFieldsPresent(TopologyConstants.DATA_STREAM, false, expected)); }
@Override public void execute(Tuple tuple) { // Check if the tuple is any known type, otherwise make it unknown TupleClassifier.Type type = classifier.classify(tuple).orElse(TupleClassifier.Type.UNKNOWN_TUPLE); switch (type) { case TICK_TUPLE: onTick(); break; case METADATA_TUPLE: onMeta(tuple); break; case QUERY_TUPLE: onQuery(tuple); break; case RECORD_TUPLE: onRecord(tuple); updateLatency(tuple); break; default: // May want to throw an error here instead of not acking log.error("Unknown tuple encountered: {}", type); return; } collector.ack(tuple); }
@Test public void testQueryErrorsAreSilentlyIgnored() { Tuple query = makeIDTuple(TupleClassifier.Type.QUERY_TUPLE, "42", "{'aggregation': { 'type': null }}"); bolt.execute(query); BulletRecord record = RecordBox.get().add("field", "b235gf23b").getRecord(); Tuple someTuple = makeRecordTuple(record); bolt.execute(someTuple); bolt.execute(someTuple); Assert.assertEquals(collector.getEmittedCount(), 0); }
@Test public void testTuplesCustomSource() { bolt = ComponentUtils.prepare(new FilterBolt("CustomSource", oneRecordConfig()), collector); Tuple query = makeIDTuple(TupleClassifier.Type.QUERY_TUPLE, "42", makeFieldFilterQuery("b235gf23b"), METADATA); bolt.execute(query); BulletRecord record = RecordBox.get().add("field", "b235gf23b").getRecord(); Tuple matching = TupleUtils.makeRawTuple("CustomSource", TopologyConstants.RECORD_STREAM, record); bolt.execute(matching); Tuple tick = TupleUtils.makeTuple(TupleClassifier.Type.TICK_TUPLE); bolt.execute(tick); BulletRecord anotherRecord = RecordBox.get().add("field", "wontmatch").getRecord(); Tuple nonMatching = TupleUtils.makeRawTuple("CustomSource", TopologyConstants.RECORD_STREAM, anotherRecord); bolt.execute(nonMatching); Tuple expected = makeDataTuple(TupleClassifier.Type.DATA_TUPLE, "42", record); Assert.assertTrue(wasRawRecordEmittedTo(TopologyConstants.DATA_STREAM, 1, expected)); Tuple notExpected = makeDataTuple(TupleClassifier.Type.DATA_TUPLE, "42", anotherRecord); Assert.assertFalse(wasRawRecordEmitted(notExpected)); }
builder.setBolt(FILTER_COMPONENT, new FilterBolt(recordComponent, config), filterBoltParallelism) .shuffleGrouping(recordComponent) .allGrouping(QUERY_COMPONENT, QUERY_STREAM)
private void onTick() { // Categorize queries in partition mode. handleCategorizedQueries(manager.categorize()); handleStats(); }
private void onRecord(Tuple tuple) { BulletRecord record = (BulletRecord) tuple.getValue(TopologyConstants.RECORD_POSITION); handleCategorizedQueries(manager.categorize(record)); }
@Test public void testFilteringSlidingWindow() { Tuple query = makeIDTuple(TupleClassifier.Type.QUERY_TUPLE, "42", makeSimpleAggregationFilterQuery("field", singletonList("b235gf23b"), EQUALS, RAW, 5, Window.Unit.RECORD, 1, Window.Unit.RECORD, 1), METADATA); bolt.execute(query); BulletRecord record = RecordBox.get().add("field", "b235gf23b").getRecord(); Tuple matching = makeRecordTuple(record); bolt.execute(matching); bolt.execute(matching); bolt.execute(matching); bolt.execute(matching); Tuple expected = makeSlidingTuple(TupleClassifier.Type.DATA_TUPLE, "42", record); Assert.assertTrue(wasRawRecordEmittedTo(TopologyConstants.DATA_STREAM, 4, expected)); }
@Override protected Querier createQuerier(Querier.Mode mode, String id, String query, BulletConfig config) { Querier spied = spy(super.createQuerier(mode, id, query, config)); List<Boolean> answers = IntStream.range(0, limitedAfter).mapToObj(i -> false) .collect(ArrayList::new, ArrayList::add, ArrayList::addAll); answers.add(true); doAnswer(returnsElementsOf(answers)).when(spied).isExceedingRateLimit(); doReturn(error).when(spied).getRateLimitError(); return spied; } }
builder.setBolt(FILTER_COMPONENT, new FilterBolt(recordComponent, config), filterBoltParallelism) .shuffleGrouping(recordComponent) .allGrouping(QUERY_COMPONENT, QUERY_STREAM)