Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

unwind_user/deferred: Add unwind cache

Cache the results of the unwind to ensure the unwind is only performed
once, even when called by multiple tracers.

The cache nr_entries gets cleared every time the task exits the kernel.
When a stacktrace is requested, nr_entries gets set to the number of
entries in the stacktrace. If another stacktrace is requested, if
nr_entries is not zero, then it contains the same stacktrace that would be
retrieved so it is not processed again and the entries is given to the
caller.

Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Indu Bhagat <indu.bhagat@oracle.com>
Cc: "Jose E. Marchesi" <jemarch@gnu.org>
Cc: Beau Belgrave <beaub@linux.microsoft.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Florian Weimer <fweimer@redhat.com>
Cc: Sam James <sam@gentoo.org>
Link: https://lore.kernel.org/20250729182405.319691167@kernel.org
Reviewed-by: Jens Remus <jremus@linux.ibm.com>
Reviewed-By: Indu Bhagat <indu.bhagat@oracle.com>
Co-developed-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>

authored by

Josh Poimboeuf and committed by
Steven Rostedt (Google)
b9c73524 5e32d0f1

+40 -8
+2
include/linux/entry-common.h
··· 12 12 #include <linux/resume_user_mode.h> 13 13 #include <linux/tick.h> 14 14 #include <linux/kmsan.h> 15 + #include <linux/unwind_deferred.h> 15 16 16 17 #include <asm/entry-common.h> 17 18 #include <asm/syscall.h> ··· 363 362 lockdep_hardirqs_on_prepare(); 364 363 instrumentation_end(); 365 364 365 + unwind_reset_info(); 366 366 user_enter_irqoff(); 367 367 arch_exit_to_user_mode(); 368 368 lockdep_hardirqs_on(CALLER_ADDR0);
+8
include/linux/unwind_deferred.h
··· 12 12 13 13 int unwind_user_faultable(struct unwind_stacktrace *trace); 14 14 15 + static __always_inline void unwind_reset_info(void) 16 + { 17 + if (unlikely(current->unwind_info.cache)) 18 + current->unwind_info.cache->nr_entries = 0; 19 + } 20 + 15 21 #else /* !CONFIG_UNWIND_USER */ 16 22 17 23 static inline void unwind_task_init(struct task_struct *task) {} 18 24 static inline void unwind_task_free(struct task_struct *task) {} 19 25 20 26 static inline int unwind_user_faultable(struct unwind_stacktrace *trace) { return -ENOSYS; } 27 + 28 + static inline void unwind_reset_info(void) {} 21 29 22 30 #endif /* !CONFIG_UNWIND_USER */ 23 31
+6 -1
include/linux/unwind_deferred_types.h
··· 2 2 #ifndef _LINUX_UNWIND_USER_DEFERRED_TYPES_H 3 3 #define _LINUX_UNWIND_USER_DEFERRED_TYPES_H 4 4 5 + struct unwind_cache { 6 + unsigned int nr_entries; 7 + unsigned long entries[]; 8 + }; 9 + 5 10 struct unwind_task_info { 6 - unsigned long *entries; 11 + struct unwind_cache *cache; 7 12 }; 8 13 9 14 #endif /* _LINUX_UNWIND_USER_DEFERRED_TYPES_H */
+24 -7
kernel/unwind/deferred.c
··· 4 4 */ 5 5 #include <linux/kernel.h> 6 6 #include <linux/sched.h> 7 + #include <linux/sizes.h> 7 8 #include <linux/slab.h> 8 9 #include <linux/unwind_deferred.h> 9 10 10 - #define UNWIND_MAX_ENTRIES 512 11 + /* Make the cache fit in a 4K page */ 12 + #define UNWIND_MAX_ENTRIES \ 13 + ((SZ_4K - sizeof(struct unwind_cache)) / sizeof(long)) 11 14 12 15 /** 13 16 * unwind_user_faultable - Produce a user stacktrace in faultable context ··· 27 24 int unwind_user_faultable(struct unwind_stacktrace *trace) 28 25 { 29 26 struct unwind_task_info *info = &current->unwind_info; 27 + struct unwind_cache *cache; 30 28 31 29 /* Should always be called from faultable context */ 32 30 might_fault(); ··· 35 31 if (current->flags & PF_EXITING) 36 32 return -EINVAL; 37 33 38 - if (!info->entries) { 39 - info->entries = kmalloc_array(UNWIND_MAX_ENTRIES, sizeof(long), 40 - GFP_KERNEL); 41 - if (!info->entries) 34 + if (!info->cache) { 35 + info->cache = kzalloc(struct_size(cache, entries, UNWIND_MAX_ENTRIES), 36 + GFP_KERNEL); 37 + if (!info->cache) 42 38 return -ENOMEM; 43 39 } 44 40 41 + cache = info->cache; 42 + trace->entries = cache->entries; 43 + 44 + if (cache->nr_entries) { 45 + /* 46 + * The user stack has already been previously unwound in this 47 + * entry context. Skip the unwind and use the cache. 48 + */ 49 + trace->nr = cache->nr_entries; 50 + return 0; 51 + } 52 + 45 53 trace->nr = 0; 46 - trace->entries = info->entries; 47 54 unwind_user(trace, UNWIND_MAX_ENTRIES); 55 + 56 + cache->nr_entries = trace->nr; 48 57 49 58 return 0; 50 59 } ··· 73 56 { 74 57 struct unwind_task_info *info = &task->unwind_info; 75 58 76 - kfree(info->entries); 59 + kfree(info->cache); 77 60 }