static void addTags(ProcessorContext processorContext, SpanCustomizer result) { result.tag(KafkaStreamsTags.KAFKA_STREAMS_APPLICATION_ID_TAG, processorContext.applicationId()); result.tag(KafkaStreamsTags.KAFKA_STREAMS_TASK_ID_TAG, processorContext.taskId().toString()); }
public GlobalProcessorContextImpl(final StreamsConfig config, final StateManager stateMgr, final StreamsMetricsImpl metrics, final ThreadCache cache) { super(new TaskId(-1, -1), config, metrics, stateMgr, cache); }
private static void decodeActiveTasks(final AssignmentInfo assignmentInfo, final DataInputStream in) throws IOException { final int count = in.readInt(); assignmentInfo.activeTasks = new ArrayList<>(count); for (int i = 0; i < count; i++) { assignmentInfo.activeTasks.add(TaskId.readFrom(in)); } }
protected void encodeTasks(final ByteBuffer buf, final Collection<TaskId> taskIds) { buf.putInt(taskIds.size()); for (final TaskId id : taskIds) { id.writeTo(buf); } }
/** * Returns ids of tasks whose states are kept on the local storage. */ public Set<TaskId> cachedTasksIds() { // A client could contain some inactive tasks whose states are still kept on the local storage in the following scenarios: // 1) the client is actively maintaining standby tasks by maintaining their states from the change log. // 2) the client has just got some tasks migrated out of itself to other clients while these task states // have not been cleaned up yet (this can happen in a rolling bounce upgrade, for example). final HashSet<TaskId> tasks = new HashSet<>(); final File[] stateDirs = taskCreator.stateDirectory().listTaskDirectories(); if (stateDirs != null) { for (final File dir : stateDirs) { try { final TaskId id = TaskId.parse(dir.getName()); // if the checkpoint file exists, the state is valid. if (new File(dir, ProcessorStateManager.CHECKPOINT_FILE_NAME).exists()) { tasks.add(id); } } catch (final TaskIdFormatException e) { // there may be some unknown files that sits in the same directory, // we should ignore these files instead trying to delete them as well } } } return tasks; }
Pair pair(final TaskId task1, final TaskId task2) { if (task1.compareTo(task2) < 0) { return new Pair(task1, task2); } return new Pair(task2, task1); }
public static TaskId readFrom(final ByteBuffer buf) { return new TaskId(buf.getInt(), buf.getInt()); }
private static void decodeTasks(final SubscriptionInfo subscriptionInfo, final ByteBuffer data) { subscriptionInfo.prevTasks = new HashSet<>(); final int numPrevTasks = data.getInt(); for (int i = 0; i < numPrevTasks; i++) { subscriptionInfo.prevTasks.add(TaskId.readFrom(data)); } subscriptionInfo.standbyTasks = new HashSet<>(); final int numStandbyTasks = data.getInt(); for (int i = 0; i < numStandbyTasks; i++) { subscriptionInfo.standbyTasks.add(TaskId.readFrom(data)); } }
private void encodeActiveAndStandbyTaskAssignment(final DataOutputStream out) throws IOException { // encode active tasks out.writeInt(activeTasks.size()); for (final TaskId id : activeTasks) { id.writeTo(out); } // encode standby tasks out.writeInt(standbyTasks.size()); for (final Map.Entry<TaskId, Set<TopicPartition>> entry : standbyTasks.entrySet()) { final TaskId id = entry.getKey(); id.writeTo(out); final Set<TopicPartition> partitions = entry.getValue(); writeTopicPartitions(out, partitions); } }
final TaskId id = TaskId.parse(dirName); if (!locks.containsKey(id)) { try {
/** * Get or create the directory for the provided {@link TaskId}. * @return directory for the {@link TaskId} * @throws ProcessorStateException if the task directory does not exists and could not be created */ public File directoryForTask(final TaskId taskId) { final File taskDir = new File(stateDir, taskId.toString()); if (!taskDir.exists() && !taskDir.mkdir()) { throw new ProcessorStateException( String.format("task directory [%s] doesn't exist and couldn't be created", taskDir.getPath())); } return taskDir; }
/** * @throws IOException if cannot read from input stream */ public static TaskId readFrom(final DataInputStream in) throws IOException { return new TaskId(in.readInt(), in.readInt()); }
private static void decodeStandbyTasks(final AssignmentInfo assignmentInfo, final DataInputStream in) throws IOException { final int count = in.readInt(); assignmentInfo.standbyTasks = new HashMap<>(count); for (int i = 0; i < count; i++) { final TaskId id = TaskId.readFrom(in); assignmentInfo.standbyTasks.put(id, readTopicPartitions(in)); } }
private void updateThreadMetadata(final Map<TaskId, StreamTask> activeTasks, final Map<TaskId, StandbyTask> standbyTasks) { final Set<TaskMetadata> activeTasksMetadata = new HashSet<>(); for (final Map.Entry<TaskId, StreamTask> task : activeTasks.entrySet()) { activeTasksMetadata.add(new TaskMetadata(task.getKey().toString(), task.getValue().partitions())); } final Set<TaskMetadata> standbyTasksMetadata = new HashSet<>(); for (final Map.Entry<TaskId, StandbyTask> task : standbyTasks.entrySet()) { standbyTasksMetadata.add(new TaskMetadata(task.getKey().toString(), task.getValue().partitions())); } threadMetadata = new ThreadMetadata(this.getName(), this.state().name(), activeTasksMetadata, standbyTasksMetadata); }
/** * @throws TaskIdFormatException if the taskIdStr is not a valid {@link TaskId} */ public static TaskId parse(final String taskIdStr) { final int index = taskIdStr.indexOf('_'); if (index <= 0 || index + 1 >= taskIdStr.length()) throw new TaskIdFormatException(taskIdStr); try { final int topicGroupId = Integer.parseInt(taskIdStr.substring(0, index)); final int partition = Integer.parseInt(taskIdStr.substring(index + 1)); return new TaskId(topicGroupId, partition); } catch (final Exception e) { throw new TaskIdFormatException(taskIdStr); } }
final String taskName = context.taskId().toString(); final Map<String, String> tagMap = metrics.tagMap("task-id", context.taskId().toString(), "processor-node-id", processorNodeName); final Map<String, String> allTagMap = metrics.tagMap("task-id", context.taskId().toString(), "processor-node-id", "all");
/** * Generate tasks with the assigned topic partitions. * * @param topicGroups group of topics that need to be joined together * @param metadata metadata of the consuming cluster * @return The map from generated task ids to the assigned partitions */ public Map<TaskId, Set<TopicPartition>> partitionGroups(final Map<Integer, Set<String>> topicGroups, final Cluster metadata) { final Map<TaskId, Set<TopicPartition>> groups = new HashMap<>(); for (final Map.Entry<Integer, Set<String>> entry : topicGroups.entrySet()) { final Integer topicGroupId = entry.getKey(); final Set<String> topicGroup = entry.getValue(); final int maxNumPartitions = maxNumPartitions(metadata, topicGroup); for (int partitionId = 0; partitionId < maxNumPartitions; partitionId++) { final Set<TopicPartition> group = new HashSet<>(topicGroup.size()); for (final String topic : topicGroup) { final List<PartitionInfo> partitions = metadata.partitionsForTopic(topic); if (partitionId < partitions.size()) { group.add(new TopicPartition(topic, partitionId)); } } groups.put(new TaskId(topicGroupId, partitionId), Collections.unmodifiableSet(group)); } } return Collections.unmodifiableMap(groups); }
public static Sensor lateRecordDropSensor(final InternalProcessorContext context) { final StreamsMetricsImpl metrics = context.metrics(); final Sensor sensor = metrics.nodeLevelSensor( context.taskId().toString(), context.currentNode().name(), "late-record-drop", Sensor.RecordingLevel.INFO ); StreamsMetricsImpl.addInvocationRateAndCount( sensor, "stream-processor-node-metrics", metrics.tagMap("task-id", context.taskId().toString(), "processor-node-id", context.currentNode().name()), "late-record-drop" ); return sensor; }
@SuppressWarnings("unchecked") private void initInternal(final ProcessorContext context) { this.context = (InternalProcessorContext) context; this.serdes = new StateSerdes<>(ProcessorStateManager.storeChangelogTopic(context.applicationId(), underlying.name()), keySerde == null ? (Serde<K>) context.keySerde() : keySerde, valueSerde == null ? (Serde<V>) context.valueSerde() : valueSerde); this.cache = this.context.getCache(); this.cacheName = ThreadCache.nameSpaceFromTaskIdAndStore(context.taskId().toString(), underlying.name()); cache.addDirtyEntryFlushListener(cacheName, new ThreadCache.DirtyEntryFlushListener() { @Override public void apply(final List<ThreadCache.DirtyEntry> entries) { for (final ThreadCache.DirtyEntry entry : entries) { putAndMaybeForward(entry, (InternalProcessorContext) context); } } }); }