/** * SpoutSpec may be changed, this class will respond to changes on tuple2StreamMetadataMap and streamRepartitionMetadataMap. * * @param spoutSpec * @param sds */ @Override public void update(SpoutSpec spoutSpec, Map<String, StreamDefinition> sds) { this.streamRepartitionMetadataList = spoutSpec.getStreamRepartitionMetadataMap().get(topic); this.converter = new Tuple2StreamConverter(spoutSpec.getTuple2StreamMetadataMap().get(topic)); this.sds = sds; }
private Kafka2TupleMetadata buildAggregationDatasource() { Kafka2TupleMetadata datasource = new Kafka2TupleMetadata(); datasource.setName(NODATA_ALERT_AGGR_DATASOURCE_NAME); datasource.setType(DATASOURCE_TYPE); datasource.setSchemeCls(DATASOURCE_SCHEME_CLS); datasource.setTopic(NODATA_ALERT_AGGR_TOPIC_NAME); Tuple2StreamMetadata codec = new Tuple2StreamMetadata(); codec.setStreamNameSelectorCls(JSON_STRING_STREAM_NAME_SELECTOR_CLS); codec.setTimestampColumn(STREAM_TIMESTAMP_COLUMN_NAME); codec.setTimestampFormat(STREAM_TIMESTAMP_FORMAT); Properties codecProperties = new Properties(); codecProperties.put("userProvidedStreamName", NODATA_ALERT_AGGR_STREAM); codecProperties.put("streamNameFormat", "%s"); codec.setStreamNameSelectorProp(codecProperties); datasource.setCodec(codec); return datasource; }
@Override public String toString() { return String.format("StreamRouterSpec[streamId=%s,partition=%s, queue=[%s]]", this.getStreamId(), this.getPartition(), this.getTargetQueue()); }
private Map<String, Kafka2TupleMetadata> createDatasource(final String topicName, final String dataSourceName) { Kafka2TupleMetadata ds = new Kafka2TupleMetadata(); ds.setName(dataSourceName); ds.setType("KAFKA"); ds.setProperties(new HashMap<String, String>()); ds.setTopic(topicName); ds.setSchemeCls("PlainStringScheme"); ds.setCodec(new Tuple2StreamMetadata()); Map<String, Kafka2TupleMetadata> dsMap = new HashMap<String, Kafka2TupleMetadata>(); dsMap.put(ds.getName(), ds); return dsMap; }
private static Kafka2TupleMetadata createKafka2TupleMetadata() { Kafka2TupleMetadata ktm = new Kafka2TupleMetadata(); ktm.setName(TEST_DATASOURCE_1); ktm.setSchemeCls("SchemeClass"); ktm.setTopic("tupleTopic"); ktm.setType("KAFKA"); ktm.setCodec(new Tuple2StreamMetadata()); return ktm; }
@Test public void testKafka2TupleMetadata() { Kafka2TupleMetadata kafka2TupleMetadata = new Kafka2TupleMetadata(); kafka2TupleMetadata.setName("setName"); kafka2TupleMetadata.setCodec(new Tuple2StreamMetadata()); kafka2TupleMetadata.setType("setType"); kafka2TupleMetadata.setTopic("setTopic"); kafka2TupleMetadata.setSchemeCls("org.apache.eagle.alert.engine.scheme.PlainStringScheme"); Kafka2TupleMetadata kafka2TupleMetadata1 = new Kafka2TupleMetadata(); kafka2TupleMetadata1.setName("setName"); kafka2TupleMetadata1.setCodec(new Tuple2StreamMetadata()); kafka2TupleMetadata1.setType("setType"); kafka2TupleMetadata1.setTopic("setTopic"); kafka2TupleMetadata1.setSchemeCls("org.apache.eagle.alert.engine.scheme.PlainStringScheme"); Assert.assertFalse(kafka2TupleMetadata1 == kafka2TupleMetadata); Assert.assertTrue(kafka2TupleMetadata1.equals(kafka2TupleMetadata)); Assert.assertTrue(kafka2TupleMetadata1.hashCode() == kafka2TupleMetadata.hashCode()); kafka2TupleMetadata1.setType("setType1"); Assert.assertFalse(kafka2TupleMetadata1.equals(kafka2TupleMetadata)); Assert.assertFalse(kafka2TupleMetadata1.hashCode() == kafka2TupleMetadata.hashCode()); } }
public static StreamRouterSpec createSampleStreamRouteSpec(String streamId, String groupByField, List<String> targetEvaluatorIds) { List<WorkSlot> slots = Arrays.asList(targetEvaluatorIds.stream().map((t) -> { return new WorkSlot("sampleTopology", t); }).toArray(WorkSlot[]::new)); StreamRouterSpec streamRouteSpec = new StreamRouterSpec(); streamRouteSpec.setStreamId(streamId); streamRouteSpec.setPartition(createSampleStreamGroupbyPartition(streamId, Arrays.asList(groupByField))); streamRouteSpec.setTargetQueue(Arrays.asList(new PolicyWorkerQueue(slots))); return streamRouteSpec; }
public Map<StreamPartition, List<StreamRouterSpec>> makeSRS() { Map<StreamPartition, List<StreamRouterSpec>> newSRS = new HashMap<>(); this.getRouterSpecs().forEach(t -> { if (!newSRS.containsKey(t.getPartition())) { newSRS.put(t.getPartition(), new ArrayList<>()); } newSRS.get(t.getPartition()).add(t); }); return newSRS; }
private static ScheduleState createScheduleState() { ScheduleState ss = new ScheduleState(); ss.setVersion(V1); ss.getMonitoredStreams().add(createMonitoredStream()); ss.getAssignments().add(createAssignment()); return ss; }
private String getUniqueBoltId(WorkSlot slot) { return String.format(UNIQUE_BOLT_ID, slot.getTopologyName(), slot.getBoltId()); }
@Test public void testStreamRepartitionStrategy1() { thrown.expect(NullPointerException.class); StreamRepartitionStrategy streamRepartitionStrategy = new StreamRepartitionStrategy(); streamRepartitionStrategy.equals(streamRepartitionStrategy); }
@Override public List<SpoutSpec> listSpoutMetadata() { ScheduleState state = getVersionedSpec(); return new ArrayList<>(state.getSpoutSpecs().values()); }
@Test public void testStreamRepartitionStrategy() { thrown.expect(NullPointerException.class); StreamRepartitionStrategy streamRepartitionStrategy = new StreamRepartitionStrategy(); streamRepartitionStrategy.hashCode(); }
private void inplaceRemove(Map<StreamPartition, List<StreamRouterSpec>> routeSpecMap, Map<StreamPartition, List<StreamRoutePartitioner>> routePartitionerMap, StreamRouterSpec toBeRemoved) { routeSpecMap.remove(toBeRemoved.getPartition()); routePartitionerMap.remove(toBeRemoved.getPartition()); }
private List<String> getTopics(SpoutSpec spoutSpec) { List<String> meta = new ArrayList<String>(); for (Kafka2TupleMetadata entry : spoutSpec.getKafka2TupleMetadataMap().values()) { meta.add(entry.getTopic()); } return meta; }
public void addDataSource(Kafka2TupleMetadata dataSource) { this.datasources.put(dataSource.getName(), dataSource); }
@Override public void addScheduleState(ScheduleState state) { scheduleStates.put(state.getVersion(), state); }
private Kafka2TupleMetadata buildAggregationOutputDatasource() { Kafka2TupleMetadata datasource = new Kafka2TupleMetadata(); datasource.setName(NODATA_ALERT_AGGR_OUTPUT_DATASOURCE_NAME); datasource.setType(DATASOURCE_TYPE); datasource.setSchemeCls(DATASOURCE_SCHEME_CLS); datasource.setTopic(NODATA_ALERT_AGGR_OUTPUT_TOPIC_NAME); Tuple2StreamMetadata codec = new Tuple2StreamMetadata(); codec.setStreamNameSelectorCls(JSON_STRING_STREAM_NAME_SELECTOR_CLS); codec.setTimestampColumn(STREAM_TIMESTAMP_COLUMN_NAME); codec.setTimestampFormat(STREAM_TIMESTAMP_FORMAT); Properties codecProperties = new Properties(); codecProperties.put("userProvidedStreamName", NODATA_ALERT_AGGR_OUTPUT_STREAM); codecProperties.put("streamNameFormat", "%s"); codec.setStreamNameSelectorProp(codecProperties); datasource.setCodec(codec); return datasource; }
public Map<StreamPartition, StreamSortSpec> makeSSS() { Map<StreamPartition, StreamSortSpec> newSSS = new HashMap<>(); this.getRouterSpecs().forEach(t -> { if (t.getPartition().getSortSpec() != null) { newSSS.put(t.getPartition(), t.getPartition().getSortSpec()); } }); return newSSS; }
/** * @param delegate actual SpoutOutputCollector to send data to following bolts * @param topic topic for this KafkaSpout to handle * @param numGroupbyBolts bolts following this spout. * @param serializer */ public SpoutOutputCollectorWrapper(CorrelationSpout spout, ISpoutOutputCollector delegate, String topic, SpoutSpec spoutSpec, int numGroupbyBolts, Map<String, StreamDefinition> sds, PartitionedEventSerializer serializer, boolean logEventEnabled) { super(delegate); this.spout = spout; this.delegate = delegate; this.topic = topic; this.streamRepartitionMetadataList = spoutSpec.getStreamRepartitionMetadataMap().get(topic); this.converter = new Tuple2StreamConverter(spoutSpec.getTuple2StreamMetadataMap().get(topic)); this.numOfRouterBolts = numGroupbyBolts; this.sds = sds; this.serializer = serializer; this.logEventEnabled = logEventEnabled; }