private KafkaStreams startKStreams(String bootstrapServers) { KafkaStreams streams = new KafkaStreams( createOrdersMaterializedView().build(), config(bootstrapServers)); metadataService = new MetadataService(streams); streams.cleanUp(); //don't do this in prod as it clears your state stores streams.start(); return streams; }
mockKafkaStreams.start(); expectLastCall(); mockKafkaStreams.setUncaughtExceptionHandler(anyObject(Thread.UncaughtExceptionHandler.class)); expectLastCall(); mockKafkaStreams.cleanUp(); expectLastCall(); mockKafkaStreams.close(); expectLastCall();
private ReadOnlyKeyValueStore<String, Order> ordersStore() { return streams.store(ORDERS_STORE_NAME, QueryableStoreTypes.keyValueStore()); }
KafkaStreams streams = new KafkaStreams(builder, props); streams.start();
uppercased.to(outputTopic); KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); streams.start(); List<String> actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig, outputTopic, expectedValues.size()); streams.close(); assertThat(actualValues).isEqualTo(expectedValues);
/** * Creates a {@link KafkaStreams} instance with a tracing-enabled {@link KafkaClientSupplier}. All * Topology Sources and Sinks (including internal Topics) will create Spans on records processed * (i.e. send or consumed). * * Use this instead of {@link KafkaStreams} constructor. * * <p>Simple example: * <pre>{@code * // KafkaStreams with tracing-enabled KafkaClientSupplier * KafkaStreams kafkaStreams = kafkaStreamsTracing.kafkaStreams(topology, streamsConfig); * }</pre> * * @see TracingKafkaClientSupplier */ public KafkaStreams kafkaStreams(Topology topology, Properties streamsConfig) { final KafkaTracing kafkaTracing = KafkaTracing.create(tracing); final KafkaClientSupplier kafkaClientSupplier = new TracingKafkaClientSupplier(kafkaTracing); return new KafkaStreams(topology, streamsConfig, kafkaClientSupplier); }
this.kafkaStreams = new KafkaStreams(topology, this.properties, this.clientSupplier); this.kafkaStreams = new KafkaStreams(topology, this.streamsConfig, this.clientSupplier); this.kafkaStreams.setStateListener(this.stateListener); this.kafkaStreams.setGlobalStateRestoreListener(this.stateRestoreListener); this.kafkaStreams.setUncaughtExceptionHandler(this.uncaughtExceptionHandler); if (this.kafkaStreamsCustomizer != null) { this.kafkaStreamsCustomizer.customize(this.kafkaStreams); this.kafkaStreams.cleanUp(); this.kafkaStreams.start(); this.running = true;
KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); streams.start(); streams.store(storeName, QueryableStoreTypes.keyValueStore()); KeyValueIterator<String, String> keyValueIterator = readOnlyKeyValueStore.all(); assertThat(keyValueIterator).containsExactlyElementsOf(expectedResultsForJoinStateStore); streams.close(); assertThat(actualResults).containsExactlyElementsOf(expectedResults);
serdesPair.applyTo(Materialized.as(storeSupplier))); KafkaStreams kafkaStreams = new KafkaStreams(streamsBuilder.build(), props); kafkaStreams.start(); ReadOnlyKeyValueStore<Integer, String> store = kafkaStreams.store( name + "_balance", QueryableStoreTypes.<Integer, String>keyValueStore() );
50); streamInstanceOne.start(); streamInstanceTwo.start(); verifyAllCustomersInStore(customers, streamInstanceOne.store(CUSTOMER_STORE, QueryableStoreTypes.keyValueStore())); verifyAllCustomersInStore(customers, streamInstanceTwo.store(CUSTOMER_STORE, QueryableStoreTypes.keyValueStore())); verifyAllProductsInStore(products, streamInstanceOne.store(PRODUCT_STORE, QueryableStoreTypes.keyValueStore())); verifyAllProductsInStore(products, streamInstanceTwo.store(PRODUCT_STORE, QueryableStoreTypes.keyValueStore()));
@Before public void createStreams() { streams = SessionWindowsExample.createStreams(CLUSTER.bootstrapServers(), CLUSTER.schemaRegistryUrl(), TestUtils.tempDirectory().getPath()); streams.start(); }
final String host = "localhost"; createStreams(host); streams.start(); TestUtils.waitForCondition(() -> !StreamsMetadata.NOT_AVAILABLE.equals(streams.allMetadataForStore(KafkaMusicExample.TOP_FIVE_SONGS_STORE)), MAX_WAIT_MS, "StreamsMetadata should be available"); try { songsStore = streams.store(KafkaMusicExample.ALL_SONGS, QueryableStoreTypes.<Long, Song>keyValueStore()); return songsStore.all().hasNext(); } catch (Exception e) {
@Override public void run() { streams.close(); } }));
@Test(expected = Exception.class) public void shouldThrowExceptionForInvalidHost() throws Exception { final int port = randomFreeLocalPort(); final String host = "someInvalidHost"; kafkaStreams = WordCountInteractiveQueriesExample.createStreams( createStreamConfig(CLUSTER.bootstrapServers(), port, "one", host)); final CountDownLatch startupLatch = new CountDownLatch(1); kafkaStreams.setStateListener((newState, oldState) -> { if (newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING) { startupLatch.countDown(); } }); kafkaStreams.start(); proxy = WordCountInteractiveQueriesExample.startRestProxy(kafkaStreams, port, host); }
@Override public synchronized void stop() { if (this.running) { try { if (this.kafkaStreams != null) { this.kafkaStreams.close(this.closeTimeout, TimeUnit.SECONDS); if (this.cleanupConfig.cleanupOnStop()) { this.kafkaStreams.cleanUp(); } this.kafkaStreams = null; } } catch (Exception e) { logger.error("Failed to stop streams", e); } finally { this.running = false; } } }
void cleanLocalState() { if (streams != null) { streams.cleanUp(); } }
KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); streams.start(); List<String> actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig, outputTopic, inputValues.size()); streams.close(); assertThat(actualValues).isEqualTo(inputValues);
@Override public void start(final String bootstrapServers, final String stateDir) { streams = aggregateOrderValidations(bootstrapServers, stateDir); streams.cleanUp(); //don't do this in prod as it clears your state stores streams.start(); log.info("Started Service " + getClass().getSimpleName()); }