void cleanLocalState() { if (streams != null) { streams.cleanUp(); } }
mockKafkaStreams.setUncaughtExceptionHandler(anyObject(Thread.UncaughtExceptionHandler.class)); expectLastCall(); mockKafkaStreams.cleanUp(); expectLastCall(); mockKafkaStreams.close();
@Override public void start(final String bootstrapServers, final String stateDir) { streams = aggregateOrderValidations(bootstrapServers, stateDir); streams.cleanUp(); //don't do this in prod as it clears your state stores streams.start(); log.info("Started Service " + getClass().getSimpleName()); }
@Override public void start(final String bootstrapServers, final String stateDir) { streams = processStreams(bootstrapServers, stateDir); streams.cleanUp(); //don't do this in prod as it clears your state stores streams.start(); log.info("Started Service " + getClass().getSimpleName()); }
@Override public void start(final String bootstrapServers, final String stateDir) { streams = processStreams(bootstrapServers, stateDir); streams.cleanUp(); //don't do this in prod as it clears your state stores streams.start(); log.info("Started Service " + getClass().getSimpleName()); }
@Override public void start(final String bootstrapServers, final String stateDir) { streams = processStreams(bootstrapServers, stateDir); streams.cleanUp(); //don't do this in prod as it clears your state stores streams.start(); log.info("Started Service " + SERVICE_APP_ID); }
public static void main(final String[] args) throws Exception { final String bootstrapServers = args.length > 0 ? args[0] : "localhost:9092"; final String schemaRegistryUrl = args.length > 1 ? args[1] : "http://localhost:8081"; final KafkaStreams streams = buildWikipediaFeed( bootstrapServers, schemaRegistryUrl, "/tmp/kafka-streams"); // Always (and unconditionally) clean local state prior to starting the processing topology. // We opt for this unconditional call here because this will make it easier for you to play around with the example // when resetting the application for doing a re-run (via the Application Reset Tool, // http://docs.confluent.io/current/streams/developer-guide.html#application-reset-tool). // // The drawback of cleaning up local state prior is that your app must rebuilt its local state from scratch, which // will take time and will require reading all the state-relevant data from the Kafka cluster over the network. // Thus in a production scenario you typically do not want to clean up always as we do here but rather only when it // is truly needed, i.e., only under certain conditions (e.g., the presence of a command line flag for your app). // See `ApplicationResetExample.java` for a production-like example. streams.cleanUp(); streams.start(); // Add shutdown hook to respond to SIGTERM and gracefully close Kafka Streams Runtime.getRuntime().addShutdownHook(new Thread(streams::close)); }
public static void main(final String[] args) throws Exception { final String bootstrapServers = args.length > 0 ? args[0] : "localhost:9092"; final String schemaRegistryUrl = args.length > 1 ? args[1] : "http://localhost:8081"; final KafkaStreams streams = buildTopArticlesStream( bootstrapServers, schemaRegistryUrl, "/tmp/kafka-streams"); // Always (and unconditionally) clean local state prior to starting the processing topology. // We opt for this unconditional call here because this will make it easier for you to play around with the example // when resetting the application for doing a re-run (via the Application Reset Tool, // http://docs.confluent.io/current/streams/developer-guide.html#application-reset-tool). // // The drawback of cleaning up local state prior is that your app must rebuilt its local state from scratch, which // will take time and will require reading all the state-relevant data from the Kafka cluster over the network. // Thus in a production scenario you typically do not want to clean up always as we do here but rather only when it // is truly needed, i.e., only under certain conditions (e.g., the presence of a command line flag for your app). // See `ApplicationResetExample.java` for a production-like example. streams.cleanUp(); streams.start(); // Add shutdown hook to respond to SIGTERM and gracefully close Kafka Streams Runtime.getRuntime().addShutdownHook(new Thread(streams::close)); }
public static void main(String[] args) { final String bootstrapServers = args.length > 0 ? args[0] : "localhost:9092"; final String schemaRegistryUrl = args.length > 1 ? args[1] : "http://localhost:8081"; final KafkaStreams streams = createStreams(bootstrapServers, schemaRegistryUrl, "/tmp/kafka-streams"); // Always (and unconditionally) clean local state prior to starting the processing topology. // We opt for this unconditional call here because this will make it easier for you to play around with the example // when resetting the application for doing a re-run (via the Application Reset Tool, // http://docs.confluent.io/current/streams/developer-guide.html#application-reset-tool). // // The drawback of cleaning up local state prior is that your app must rebuilt its local state from scratch, which // will take time and will require reading all the state-relevant data from the Kafka cluster over the network. // Thus in a production scenario you typically do not want to clean up always as we do here but rather only when it // is truly needed, i.e., only under certain conditions (e.g., the presence of a command line flag for your app). // See `ApplicationResetExample.java` for a production-like example. streams.cleanUp(); streams.start(); // Add shutdown hook to respond to SIGTERM and gracefully close Kafka Streams Runtime.getRuntime().addShutdownHook(new Thread(streams::close)); }
public static void main(String[] args) { final String bootstrapServers = args.length > 0 ? args[0] : "localhost:9092"; final String schemaRegistryUrl = args.length > 1 ? args[1] : "http://localhost:8081"; final KafkaStreams streams = createStreams(bootstrapServers, schemaRegistryUrl, "/tmp/kafka-streams-global-tables"); // Always (and unconditionally) clean local state prior to starting the processing topology. // We opt for this unconditional call here because this will make it easier for you to play around with the example // when resetting the application for doing a re-run (via the Application Reset Tool, // http://docs.confluent.io/current/streams/developer-guide.html#application-reset-tool). // // The drawback of cleaning up local state prior is that your app must rebuilt its local state from scratch, which // will take time and will require reading all the state-relevant data from the Kafka cluster over the network. // Thus in a production scenario you typically do not want to clean up always as we do here but rather only when it // is truly needed, i.e., only under certain conditions (e.g., the presence of a command line flag for your app). // See `ApplicationResetExample.java` for a production-like example. streams.cleanUp(); // start processing streams.start(); // Add shutdown hook to respond to SIGTERM and gracefully close Kafka Streams Runtime.getRuntime().addShutdownHook(new Thread(streams::close)); }
public static void main(final String[] args) throws Exception { final String bootstrapServers = args.length > 0 ? args[0] : "localhost:9092"; final String schemaRegistryUrl = args.length > 1 ? args[1] : "http://localhost:8081"; final KafkaStreams streams = buildWikipediaFeed( bootstrapServers, schemaRegistryUrl, "/tmp/kafka-streams"); // Always (and unconditionally) clean local state prior to starting the processing topology. // We opt for this unconditional call here because this will make it easier for you to play around with the example // when resetting the application for doing a re-run (via the Application Reset Tool, // http://docs.confluent.io/current/streams/developer-guide.html#application-reset-tool). // // The drawback of cleaning up local state prior is that your app must rebuilt its local state from scratch, which // will take time and will require reading all the state-relevant data from the Kafka cluster over the network. // Thus in a production scenario you typically do not want to clean up always as we do here but rather only when it // is truly needed, i.e., only under certain conditions (e.g., the presence of a command line flag for your app). // See `ApplicationResetExample.java` for a production-like example. streams.cleanUp(); streams.start(); // Add shutdown hook to respond to SIGTERM and gracefully close Kafka Streams Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() { @Override public void run() { streams.close(); } })); }
@Override public synchronized void stop() { if (this.running) { try { if (this.kafkaStreams != null) { this.kafkaStreams.close(this.closeTimeout, TimeUnit.SECONDS); if (this.cleanupConfig.cleanupOnStop()) { this.kafkaStreams.cleanUp(); } this.kafkaStreams = null; } } catch (Exception e) { logger.error("Failed to stop streams", e); } finally { this.running = false; } } }
streams.cleanUp(); streams.start();
streams.cleanUp(); streams.start();
private KafkaStreams startKStreams(String bootstrapServers) { KafkaStreams streams = new KafkaStreams( createOrdersMaterializedView().build(), config(bootstrapServers)); metadataService = new MetadataService(streams); streams.cleanUp(); //don't do this in prod as it clears your state stores streams.start(); return streams; }
streams.cleanUp(); streams.start();
this.kafkaStreams.cleanUp();
public static KafkaStreams run(final boolean doReset, final Properties streamsConfiguration) { // Define the processing topology final StreamsBuilder builder = new StreamsBuilder(); final KStream<String, String> input = builder.stream("my-input-topic"); input.selectKey((key, value) -> value.split(" ")[0]) .groupByKey() .count() .toStream() .to("my-output-topic", Produced.with(Serdes.String(), Serdes.Long())); final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); // Delete the application's local state on reset if (doReset) { streams.cleanUp(); } streams.start(); return streams; }