@Override public KeyValue<K, V> transform(K key, V value) { return KeyValue.pair(key, value); } });
@Override public KeyValue<String, Prediction> apply(String key, JsonNode value) { //Nasty hack until we get correct method to reduce and send non or final per second aggregations to influxdb Random r = new Random(); Prediction pred = new Prediction(); pred.parse(value); String ikey = pred.consumer+"_"+pred.variation+"_"+pred.model+"_"+pred.predictedClass+"_"+pred.time+"_"+r.nextInt(); return new KeyValue<String,Prediction>(ikey,pred); }
KStreamBuilder builder = new KStreamBuilder(); System.out.println("topic:"+topic); final String parseDateMethod = ns.getString("parse_date_method"); KStream<byte[], JsonNode> source = builder.stream(Serdes.ByteArray(),jsonSerde,topic); source.filter(new Predicate<byte[], JsonNode>() { .foreach(new ForeachAction<byte[], JsonNode>() { KafkaStreams streams = new KafkaStreams(builder, props); streams.start();
influxDB.enableBatch(50, 5, TimeUnit.SECONDS); KStreamBuilder builder = new KStreamBuilder(); KStream<String, JsonNode> source = builder.stream(stringSerde,jsonSerde,ns.getString("topic")); KStream<String,JsonNode>[] branches = source.branch( new Predicate<String, JsonNode>() impressionsStream.map(new KeyValueMapper<String, JsonNode, KeyValue<String,Impression>>() { .foreach( new ForeachAction<String, Impression>() { @Override requestsStream.map(new KeyValueMapper<String, JsonNode, KeyValue<String,Request>>() { .foreach(new ForeachAction<String, Request>() { KafkaStreams streams = new KafkaStreams(builder, props); streams.start();
writtenRows.add(value); rowQueue.put(new KeyValue<>(key, value)); mockKafkaStreams.start(); expectLastCall(); mockKafkaStreams.setUncaughtExceptionHandler(anyObject(Thread.UncaughtExceptionHandler.class)); expectLastCall(); mockKafkaStreams.cleanUp(); expectLastCall(); mockKafkaStreams.close(); expectLastCall();
@Override public void run() { streams.close(); } }));
/** * Creates a {@link KafkaStreams} instance with a tracing-enabled {@link KafkaClientSupplier}. All * Topology Sources and Sinks (including internal Topics) will create Spans on records processed * (i.e. send or consumed). * * Use this instead of {@link KafkaStreams} constructor. * * <p>Simple example: * <pre>{@code * // KafkaStreams with tracing-enabled KafkaClientSupplier * KafkaStreams kafkaStreams = kafkaStreamsTracing.kafkaStreams(topology, streamsConfig); * }</pre> * * @see TracingKafkaClientSupplier */ public KafkaStreams kafkaStreams(Topology topology, Properties streamsConfig) { final KafkaTracing kafkaTracing = KafkaTracing.create(tracing); final KafkaClientSupplier kafkaClientSupplier = new TracingKafkaClientSupplier(kafkaTracing); return new KafkaStreams(topology, streamsConfig, kafkaClientSupplier); }
final Topology topology = mock(Topology.class); final TopologyDescription topologyDescription = mock(TopologyDescription.class); expect(topology.describe()).andReturn(topologyDescription); replay(queryStreams, topology, topologyDescription); final Map<String, Object> streamsProperties = Collections.singletonMap("k", "v");
@Before public void setUp() { objectMapper = JsonMapper.INSTANCE.mapper; ehCapture = newCapture(); drainCapture = newCapture(); limitHandlerCapture = newCapture(); final Schema schema = SchemaBuilder.struct().field("col1", Schema.OPTIONAL_STRING_SCHEMA).build(); final KafkaStreams kStreams = niceMock(KafkaStreams.class); kStreams.setStateListener(anyObject()); expectLastCall(); expect(kStreams.state()).andReturn(State.RUNNING); expect(queryMetadata.getRowQueue()).andReturn(rowQueue).anyTimes(); expect(queryMetadata.getResultSchema()).andReturn(schema).anyTimes(); queryMetadata.setLimitHandler(capture(limitHandlerCapture)); expectLastCall().once(); queryMetadata.setUncaughtExceptionHandler(capture(ehCapture)); expectLastCall(); replay(kStreams); }
CompatibilityBreakingStreamsConfig(final String name, final Object defaultValueLegacy, final Object defaultValueCurrent) { this.name = Objects.requireNonNull(name); if (!StreamsConfig.configDef().names().contains(name)) { throw new IllegalArgumentException( String.format("%s is not a valid streams config", name)); } this.defaultValueLegacy = defaultValueLegacy; this.defaultValueCurrent = defaultValueCurrent; }
influxDB.enableBatch(50, 5, TimeUnit.SECONDS); KStreamBuilder builder = new KStreamBuilder(); KStream<String, JsonNode> source = builder.stream(stringSerde,jsonSerde,ns.getString("topic")); source.filter( new Predicate<String, JsonNode>() .map(new KeyValueMapper<String, JsonNode, KeyValue<String,Prediction>>() { .foreach(new ForeachAction<String, Prediction>() { KafkaStreams streams = new KafkaStreams(builder, props); streams.start();
@Override public void stop() { if (streams != null) { streams.close(); } }
private IAnswer<Integer> rows(final Object... rows) { return () -> { final Collection<KeyValue<String, GenericRow>> output = drainCapture.getValue(); Arrays.stream(rows) .map(ImmutableList::of) .map(GenericRow::new) .forEach(row -> output.add(new KeyValue<>("no used", row))); return rows.length; }; }
final Topology topology = mock(Topology.class); final TopologyDescription topologyDescription = mock(TopologyDescription.class); expect(topology.describe()).andReturn(topologyDescription); replay(topology, topologyDescription); final KsqlTopic sinkTopic = new KsqlTopic("fake_sink", "fake_sink", new KsqlJsonTopicSerDe(), true);
@Override public void stop() { if (streams != null) { streams.close(); } }
@Override public void run() { streams.close(); } }));
@Override public void stop() { if (streams != null) { streams.close(); } }
@Override public void stop() { if (streams != null) { streams.close(); } }
@Override public void run() { streams.close(); } }));