@SuppressWarnings("rawtypes") @Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { if (LOG.isDebugEnabled()) { LOG.debug("open method invoked"); } this.conf = conf; this.context = context; this.collector = collector; this.taskIndex = context.getThisTaskIndex(); // initialize an empty SpoutSpec cachedSpoutSpec = new SpoutSpec(topologyId, new HashMap<>(), new HashMap<>(), new HashMap<>()); changeNotifyService.registerListener(this); changeNotifyService.init(config, MetadataType.SPOUT); // register KafkaSpout metric kafkaSpoutMetric = new KafkaSpoutMetric(); context.registerMetric("kafkaSpout", kafkaSpoutMetric, 60); this.serializer = Serializers.newPartitionedEventSerializer(this); }
/** * SpoutSpec may be changed, this class will respond to changes on tuple2StreamMetadataMap and streamRepartitionMetadataMap. * * @param spoutSpec * @param sds */ @Override public void update(SpoutSpec spoutSpec, Map<String, StreamDefinition> sds) { this.streamRepartitionMetadataList = spoutSpec.getStreamRepartitionMetadataMap().get(topic); this.converter = new Tuple2StreamConverter(spoutSpec.getTuple2StreamMetadataMap().get(topic)); this.sds = sds; }
private List<String> getTopics(SpoutSpec spoutSpec) { List<String> meta = new ArrayList<String>(); for (Kafka2TupleMetadata entry : spoutSpec.getKafka2TupleMetadataMap().values()) { meta.add(entry.getTopic()); } return meta; }
Assert.assertEquals(1, ss.getKafka2TupleMetadataMap().size()); Assert.assertEquals(TEST_TOPIC, ss.getKafka2TupleMetadataMap().keySet().iterator().next()); Assert.assertEquals(1, ss.getStreamRepartitionMetadataMap().size()); List<StreamRepartitionMetadata> metas = ss.getStreamRepartitionMetadataMap().values().iterator().next(); Assert.assertEquals(1, metas.size()); Assert.assertEquals(1, ss.getKafka2TupleMetadataMap().size()); Assert.assertEquals(TEST_TOPIC, ss.getKafka2TupleMetadataMap().keySet().iterator().next()); Assert.assertEquals(1, ss.getStreamRepartitionMetadataMap().size()); List<StreamRepartitionMetadata> metas = ss.getStreamRepartitionMetadataMap().values().iterator().next(); Assert.assertEquals(1, metas.size());
Assert.assertEquals(version, ss.getVersion()); Assert.assertEquals(1, ss.getKafka2TupleMetadataMap().size()); Assert.assertEquals(TEST_TOPIC, ss.getKafka2TupleMetadataMap().keySet().iterator().next()); Assert.assertEquals(1, ss.getStreamRepartitionMetadataMap().size()); List<StreamRepartitionMetadata> metas = ss.getStreamRepartitionMetadataMap().values().iterator().next(); Assert.assertEquals(1, metas.size()); Assert.assertEquals(version, ss.getVersion()); Assert.assertEquals(0, ss.getKafka2TupleMetadataMap().size());
@Override protected KafkaSpoutWrapper createKafkaSpout(Config config, Map conf, TopologyContext context, SpoutOutputCollector collector, String topic, String topic2SchemeClsName, SpoutSpec streamMetadatas, Map<String, StreamDefinition> sds) { Assert.assertEquals(1, streamMetadatas.getStreamRepartitionMetadataMap().size()); Assert.assertTrue(streamMetadatas.getStream("s1") != null); Assert.assertTrue(streamMetadatas.getStream("s2") != null); Assert.assertEquals(topicName, streamMetadatas.getStream("s1").getTopicName()); Assert.assertEquals(topicName, streamMetadatas.getStream("s2").getTopicName()); LOG.info("successfully verified new topic and streams"); verified.set(true); return null; } };
AlertBoltSpec alertSpec = state.getAlertSpecs().get(joinTopo); Assert.assertEquals(1, joinSpout.getStreamRepartitionMetadataMap().size()); Assert.assertEquals(2, joinSpout.getStreamRepartitionMetadataMap().get(TEST_TOPIC).size());
ss.setVersion(version);
Assert.assertEquals(1, ss.getKafka2TupleMetadataMap().size()); Assert.assertEquals(TEST_TOPIC, ss.getKafka2TupleMetadataMap().keySet().iterator().next()); Assert.assertEquals(1, ss.getStreamRepartitionMetadataMap().size()); List<StreamRepartitionMetadata> metas = ss.getStreamRepartitionMetadataMap().values().iterator().next(); Assert.assertEquals(1, metas.size()); Assert.assertEquals(0, ss.getKafka2TupleMetadataMap().size());
SpoutSpec spoutSpec = new SpoutSpec(topo.getName(), streamsMap, tss, dss); topoSpoutSpecsMap.put(topo.getName(), spoutSpec);
/** * @param delegate actual SpoutOutputCollector to send data to following bolts * @param topic topic for this KafkaSpout to handle * @param numGroupbyBolts bolts following this spout. * @param serializer */ public SpoutOutputCollectorWrapper(CorrelationSpout spout, ISpoutOutputCollector delegate, String topic, SpoutSpec spoutSpec, int numGroupbyBolts, Map<String, StreamDefinition> sds, PartitionedEventSerializer serializer, boolean logEventEnabled) { super(delegate); this.spout = spout; this.delegate = delegate; this.topic = topic; this.streamRepartitionMetadataList = spoutSpec.getStreamRepartitionMetadataMap().get(topic); this.converter = new Tuple2StreamConverter(spoutSpec.getTuple2StreamMetadataMap().get(topic)); this.numOfRouterBolts = numGroupbyBolts; this.sds = sds; this.serializer = serializer; this.logEventEnabled = logEventEnabled; }
for (Kafka2TupleMetadata ds : newMeta.getKafka2TupleMetadataMap().values()) { newSchemaName.put(ds.getTopic(), ds.getSchemeCls()); dataSourceProperties.put(ds.getTopic(), ds.getProperties());
streamMetadatas.put(dataSourceName, Arrays.asList(m1)); SpoutSpec cachedMetadata = new SpoutSpec(topoId, streamMetadatas, null, dsMap); spout.onReload(new SpoutSpec(topoId, new HashMap<String, List<StreamRepartitionMetadata>>(), new HashMap<>(), new HashMap<String, Kafka2TupleMetadata>()), null); } catch (Exception ex) {
dataSources.put(dataSourceName, Arrays.asList(m1, m2)); SpoutSpec newMetadata = new SpoutSpec(topoId, dataSources, null, dsMap); spout.onReload(newMetadata, null); Assert.assertTrue(verified.get());
dataSources.put(ds.getName(), Arrays.asList(m1)); SpoutSpec newMetadata = new SpoutSpec(topoId, dataSources, null, dsMap);
SpoutSpec newSpec = new SpoutSpec(topologyName, streamRepartitionMetadataMap, tuple2StreamMetadataMap, kafka2TupleMetadataMap); String json = MetadataSerDeser.serialize(newSpec); System.out.println(json);