@Override public String name() { return innerState.name(); }
@Override public String name() { return innerState.name(); }
void registerGlobalStateStores(final List<StateStore> stateStores) { log.debug("Register global stores {}", stateStores); for (final StateStore stateStore : stateStores) { globalStores.put(stateStore.name(), stateStore); } }
@Override public void flush() { log.debug("Flushing all global globalStores registered in the state manager"); for (final StateStore store : this.globalStores.values()) { try { log.trace("Flushing global store={}", store.name()); store.flush(); } catch (final Exception e) { throw new ProcessorStateException(String.format("Failed to flush global state store %s", store.name()), e); } } }
@Override public void flush() { ProcessorStateException firstException = null; // attempting to flush the stores if (!stores.isEmpty()) { log.debug("Flushing all stores registered in the state manager"); for (final StateStore store : stores.values()) { log.trace("Flushing store {}", store.name()); try { store.flush(); } catch (final Exception e) { if (firstException == null) { firstException = new ProcessorStateException(String.format("%sFailed to flush state store %s", logPrefix, store.name()), e); } log.error("Failed to flush state store {}: ", store.name(), e); } } } if (firstException != null) { throw firstException; } }
public void register(final StateStore store, final StateRestoreCallback stateRestoreCallback) { if (globalStores.containsKey(store.name())) { throw new IllegalArgumentException(String.format("Global Store %s has already been registered", store.name())); if (!globalStoreNames.contains(store.name())) { throw new IllegalArgumentException(String.format("Trying to register store %s that is not a known global store", store.name())); throw new IllegalArgumentException(String.format("The stateRestoreCallback provided for store %s was null", store.name())); log.info("Restoring state for global store {}", store.name()); final List<TopicPartition> topicPartitions = topicPartitionsForStore(store); Map<TopicPartition, Long> highWatermarks = null; log.error("Failed to get end offsets for topic partitions of global store {} after {} retry attempts. " + "You can increase the number of retries via configuration parameter `retries`.", store.name(), retries, retryableException); throw new StreamsException(String.format("Failed to get end offsets for topic partitions of global store %s after %d retry attempts. " + "You can increase the number of retries via configuration parameter `retries`.", store.name(), retries), retryableException); restoreState(stateRestoreCallback, topicPartitions, highWatermarks, store.name()); globalStores.put(store.name(), store); } finally { globalConsumer.unsubscribe();
log.debug("Closing its state manager and all the registered state stores"); for (final StateStore store : stores.values()) { log.debug("Closing storage engine {}", store.name()); try { store.close(); } catch (final Exception e) { if (firstException == null) { firstException = new ProcessorStateException(String.format("%sFailed to close state store %s", logPrefix, store.name()), e); log.error("Failed to close state store {}: ", store.name(), e);
void validateStoreOpen() { if (!innerState.isOpen()) { throw new InvalidStateStoreException("Store " + innerState.name() + " is currently closed."); } }
@Override public void checkpoint(final Map<TopicPartition, Long> checkpointableOffsets) { this.checkpointableOffsets.putAll(changelogReader.restoredOffsets()); for (final StateStore store : stores.values()) { final String storeName = store.name(); // only checkpoint the offset to the offsets file if // it is persistent AND changelog enabled if (store.persistent() && storeToChangelogTopic.containsKey(storeName)) { final String changelogTopic = storeToChangelogTopic.get(storeName); final TopicPartition topicPartition = new TopicPartition(changelogTopic, getPartition(storeName)); if (checkpointableOffsets.containsKey(topicPartition)) { // store the last offset + 1 (the log position after restoration) this.checkpointableOffsets.put(topicPartition, checkpointableOffsets.get(topicPartition) + 1); } else if (standbyRestoredOffsets.containsKey(topicPartition)) { this.checkpointableOffsets.put(topicPartition, standbyRestoredOffsets.get(topicPartition)); } } } // write the checkpoint file before closing if (checkpoint == null) { checkpoint = new OffsetCheckpoint(new File(baseDir, CHECKPOINT_FILE_NAME)); } log.trace("Writing checkpoint: {}", this.checkpointableOffsets); try { checkpoint.write(this.checkpointableOffsets); } catch (final IOException e) { log.warn("Failed to write offset checkpoint file to {}: {}", checkpoint, e); } }
private List<TopicPartition> topicPartitionsForStore(final StateStore store) { final String sourceTopic = topology.storeToChangelogTopic().get(store.name()); List<PartitionInfo> partitionInfos; int attempts = 0; throw new StreamsException(String.format("There are no partitions available for topic %s when initializing global store %s", sourceTopic, store.name()));
@Override public void register(final StateStore store, final StateRestoreCallback stateRestoreCallback) { final String storeName = store.name(); log.debug("Registering state store {} to its state manager", storeName);
@Override public Set<String> initialize() { try { if (!stateDirectory.lockGlobalState()) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (final IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (final IOException e) { try { stateDirectory.unlockGlobalState(); } catch (final IOException e1) { log.error("Failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state globalStores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); }
public void init(final ProcessorContext context, final StateStore root) { topic = ProcessorStateManager.storeChangelogTopic(context.applicationId(), root.name()); initInternal((InternalProcessorContext) context); bytesStore.init(context, root); }
final String storeName = stateStore.name(); if (storeToBeReinitialized.contains(storeName)) { try {
/** * Package-private for testing only * * @throws StreamsException If the store's change log does not contain the partition */ void registerStateStores() { if (topology.stateStores().isEmpty()) { return; } try { if (!stateDirectory.lock(id)) { throw new LockException(String.format("%sFailed to lock the state directory for task %s", logPrefix, id)); } } catch (final IOException e) { throw new StreamsException( String.format("%sFatal error while trying to lock the state directory for task %s", logPrefix, id)); } log.trace("Initializing state stores"); // set initial offset limits updateOffsetLimits(); for (final StateStore store : topology.stateStores()) { log.trace("Initializing store {}", store.name()); processorContext.uninitialize(); store.init(processorContext, store); } }
public GlobalStateManagerImpl(final LogContext logContext, final ProcessorTopology topology, final Consumer<byte[], byte[]> globalConsumer, final StateDirectory stateDirectory, final StateRestoreListener stateRestoreListener, final StreamsConfig config) { super(stateDirectory.globalStateDir(), StreamsConfig.EXACTLY_ONCE.equals(config.getString(StreamsConfig.PROCESSING_GUARANTEE_CONFIG))); // Find non persistent store's topics final Map<String, String> storeToChangelogTopic = topology.storeToChangelogTopic(); for (final StateStore store : topology.globalStateStores()) { if (!store.persistent()) { globalNonPersistentStoresTopics.add(storeToChangelogTopic.get(store.name())); } } this.log = logContext.logger(GlobalStateManagerImpl.class); this.topology = topology; this.globalConsumer = globalConsumer; this.stateDirectory = stateDirectory; this.stateRestoreListener = stateRestoreListener; this.retries = config.getInt(StreamsConfig.RETRIES_CONFIG); this.retryBackoffMs = config.getLong(StreamsConfig.RETRY_BACKOFF_MS_CONFIG); this.pollTime = Duration.ofMillis(config.getLong(StreamsConfig.POLL_MS_CONFIG)); }
@Override public void init(final ProcessorContext context, final StateStore root) { this.context = (InternalProcessorContext) context; final StreamsMetricsImpl metrics = this.context.metrics(); final String taskName = context.taskId().toString(); expiredRecordSensor = metrics.storeLevelSensor( taskName, name(), "expired-window-record-drop", Sensor.RecordingLevel.INFO ); addInvocationRateAndCount( expiredRecordSensor, "stream-" + metricScope + "-metrics", metrics.tagMap("task-id", taskName, metricScope + "-id", name()), "expired-window-record-drop" ); keySchema.init(ProcessorStateManager.storeChangelogTopic(context.applicationId(), root.name())); segments.openExisting(this.context); bulkLoadSegments = new HashSet<>(segments.allSegments()); // register and possibly restore the state from the logs context.register(root, new RocksDBSegmentsBatchingRestoreCallback()); open = true; }