/** * Creates a {@link KafkaStreams} instance with a tracing-enabled {@link KafkaClientSupplier}. All * Topology Sources and Sinks (including internal Topics) will create Spans on records processed * (i.e. send or consumed). * * Use this instead of {@link KafkaStreams} constructor. * * <p>Simple example: * <pre>{@code * // KafkaStreams with tracing-enabled KafkaClientSupplier * KafkaStreams kafkaStreams = kafkaStreamsTracing.kafkaStreams(topology, streamsConfig); * }</pre> * * @see TracingKafkaClientSupplier */ public KafkaStreams kafkaStreams(Topology topology, Properties streamsConfig) { final KafkaTracing kafkaTracing = KafkaTracing.create(tracing); final KafkaClientSupplier kafkaClientSupplier = new TracingKafkaClientSupplier(kafkaTracing); return new KafkaStreams(topology, streamsConfig, kafkaClientSupplier); }
KafkaStreams streams = new KafkaStreams(builder, props); streams.start();
KafkaStreams streams = new KafkaStreams(builder, props); streams.start();
KafkaStreams streams = new KafkaStreams(builder, props); streams.start();
final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration);
sumOfOddNumbers.toStream().to(SUM_OF_ODD_NUMBERS_TOPIC); final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration);
return new KafkaStreams(builder.build(), streamsConfiguration);
private KafkaStreams startKStreams(String bootstrapServers) { KafkaStreams streams = new KafkaStreams( createOrdersMaterializedView().build(), config(bootstrapServers)); metadataService = new MetadataService(streams); streams.cleanUp(); //don't do this in prod as it clears your state stores streams.start(); return streams; }
return new KafkaStreams(builder.build(), streamsConfiguration);
this.kafkaStreams = new KafkaStreams(topology, this.properties, this.clientSupplier); this.kafkaStreams = new KafkaStreams(topology, this.streamsConfig, this.clientSupplier);
uppercased.to(outputTopic); KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); streams.start();
uppercasedAndAnonymized.to(outputTopic); KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); streams.start();
sumOfOddNumbers.toStream().to(outputTopic); KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); streams.start();
public static KafkaStreams run(final boolean doReset, final Properties streamsConfiguration) { // Define the processing topology final StreamsBuilder builder = new StreamsBuilder(); final KStream<String, String> input = builder.stream("my-input-topic"); input.selectKey((key, value) -> value.split(" ")[0]) .groupByKey() .count() .toStream() .to("my-output-topic", Produced.with(Serdes.String(), Serdes.Long())); final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); // Delete the application's local state on reset if (doReset) { streams.cleanUp(); } streams.start(); return streams; }
static KafkaStreams createStreams(final Properties streamsConfiguration) { final Serde<String> stringSerde = Serdes.String(); StreamsBuilder builder = new StreamsBuilder(); KStream<String, String> textLines = builder.stream(TEXT_LINES_TOPIC, Consumed.with(Serdes.String(), Serdes.String())); final KGroupedStream<String, String> groupedByWord = textLines .flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+"))) .groupBy((key, word) -> word, Serialized.with(stringSerde, stringSerde)); // Create a State Store for with the all time word count groupedByWord.count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("word-count") .withValueSerde(Serdes.Long())); // Create a Windowed State Store that contains the word count for every // 1 minute groupedByWord.windowedBy(TimeWindows.of(60000)) .count(Materialized.<String, Long, WindowStore<Bytes, byte[]>>as("windowed-word-count") .withValueSerde(Serdes.Long())); return new KafkaStreams(builder.build(), streamsConfiguration); }
return new KafkaStreams(builder.build(), new StreamsConfig(config));
Topics.ORDER_VALIDATIONS.valueSerde())); return new KafkaStreams(builder.build(), MicroserviceUtils.baseStreamsConfig(bootstrapServers, stateDir, SERVICE_APP_ID));
private KafkaStreams processStreams(final String bootstrapServers, final String stateDir) { final StreamsBuilder builder = new StreamsBuilder(); //Create the streams/tables for the join final KStream<String, Order> orders = builder.stream(ORDERS.name(), Consumed.with(ORDERS.keySerde(), ORDERS.valueSerde())); final KStream<String, Payment> payments = builder.stream(PAYMENTS.name(), Consumed.with(PAYMENTS.keySerde(), PAYMENTS.valueSerde())) //Rekey payments to be by OrderId for the windowed join .selectKey((s, payment) -> payment.getOrderId()); final GlobalKTable<Long, Customer> customers = builder.globalTable(CUSTOMERS.name(), Consumed.with(CUSTOMERS.keySerde(), CUSTOMERS.valueSerde())); final Joined<String, Order, Payment> serdes = Joined .with(ORDERS.keySerde(), ORDERS.valueSerde(), PAYMENTS.valueSerde()); //Join the two streams and the table then send an email for each orders.join(payments, EmailTuple::new, //Join Orders and Payments streams JoinWindows.of(MIN), serdes) //Next join to the GKTable of Customers .join(customers, (key1, tuple) -> tuple.order.getCustomerId(), // note how, because we use a GKtable, we can join on any attribute of the Customer. EmailTuple::setCustomer) //Now for each tuple send an email. .peek((key, emailTuple) -> emailer.sendEmail(emailTuple) ); return new KafkaStreams(builder.build(), baseStreamsConfig(bootstrapServers, stateDir, SERVICE_APP_ID)); }
props.setProperty(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, "0"); return new KafkaStreams(builder.build(), props);
return new KafkaStreams(builder.build(), baseStreamsConfig(bootstrapServers, stateDir, SERVICE_APP_ID));