Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

stackdepot: Enable context analysis

Enable context analysis for stackdepot.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20251219154418.3592607-32-elver@google.com

authored by

Marco Elver and committed by
Peter Zijlstra
c3d3023f 0eaa911f

+15 -6
+1
lib/Makefile
··· 250 250 # Prevent the compiler from calling builtins like memcmp() or bcmp() from this 251 251 # file. 252 252 CFLAGS_stackdepot.o += -fno-builtin 253 + CONTEXT_ANALYSIS_stackdepot.o := y 253 254 obj-$(CONFIG_STACKDEPOT) += stackdepot.o 254 255 KASAN_SANITIZE_stackdepot.o := n 255 256 # In particular, instrumenting stackdepot.c with KMSAN will result in infinite
+14 -6
lib/stackdepot.c
··· 61 61 /* Hash mask for indexing the table. */ 62 62 static unsigned int stack_hash_mask; 63 63 64 + /* The lock must be held when performing pool or freelist modifications. */ 65 + static DEFINE_RAW_SPINLOCK(pool_lock); 64 66 /* Array of memory regions that store stack records. */ 65 - static void **stack_pools; 67 + static void **stack_pools __pt_guarded_by(&pool_lock); 66 68 /* Newly allocated pool that is not yet added to stack_pools. */ 67 69 static void *new_pool; 68 70 /* Number of pools in stack_pools. */ 69 71 static int pools_num; 70 72 /* Offset to the unused space in the currently used pool. */ 71 - static size_t pool_offset = DEPOT_POOL_SIZE; 73 + static size_t pool_offset __guarded_by(&pool_lock) = DEPOT_POOL_SIZE; 72 74 /* Freelist of stack records within stack_pools. */ 73 - static LIST_HEAD(free_stacks); 74 - /* The lock must be held when performing pool or freelist modifications. */ 75 - static DEFINE_RAW_SPINLOCK(pool_lock); 75 + static __guarded_by(&pool_lock) LIST_HEAD(free_stacks); 76 76 77 77 /* Statistics counters for debugfs. */ 78 78 enum depot_counter_id { ··· 291 291 * Initializes new stack pool, and updates the list of pools. 292 292 */ 293 293 static bool depot_init_pool(void **prealloc) 294 + __must_hold(&pool_lock) 294 295 { 295 296 lockdep_assert_held(&pool_lock); 296 297 ··· 339 338 340 339 /* Keeps the preallocated memory to be used for a new stack depot pool. */ 341 340 static void depot_keep_new_pool(void **prealloc) 341 + __must_hold(&pool_lock) 342 342 { 343 343 lockdep_assert_held(&pool_lock); 344 344 ··· 359 357 * the current pre-allocation. 360 358 */ 361 359 static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size) 360 + __must_hold(&pool_lock) 362 361 { 363 362 struct stack_record *stack; 364 363 void *current_pool; ··· 394 391 395 392 /* Try to find next free usable entry from the freelist. */ 396 393 static struct stack_record *depot_pop_free(void) 394 + __must_hold(&pool_lock) 397 395 { 398 396 struct stack_record *stack; 399 397 ··· 432 428 /* Allocates a new stack in a stack depot pool. */ 433 429 static struct stack_record * 434 430 depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, depot_flags_t flags, void **prealloc) 431 + __must_hold(&pool_lock) 435 432 { 436 433 struct stack_record *stack = NULL; 437 434 size_t record_size; ··· 491 486 } 492 487 493 488 static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle) 489 + __must_not_hold(&pool_lock) 494 490 { 495 491 const int pools_num_cached = READ_ONCE(pools_num); 496 492 union handle_parts parts = { .handle = handle }; ··· 508 502 return NULL; 509 503 } 510 504 511 - pool = stack_pools[pool_index]; 505 + /* @pool_index either valid, or user passed in corrupted value. */ 506 + pool = context_unsafe(stack_pools[pool_index]); 512 507 if (WARN_ON(!pool)) 513 508 return NULL; 514 509 ··· 522 515 523 516 /* Links stack into the freelist. */ 524 517 static void depot_free_stack(struct stack_record *stack) 518 + __must_not_hold(&pool_lock) 525 519 { 526 520 unsigned long flags; 527 521