Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

compiler-context-analysis: Introduce scoped init guards

Add scoped init guard definitions for common synchronization primitives
supported by context analysis.

The scoped init guards treat the context as active within initialization
scope of the underlying context lock, given initialization implies
exclusive access to the underlying object. This allows initialization of
guarded members without disabling context analysis, while documenting
initialization from subsequent usage.

The documentation is updated with the new recommendation. Where scoped
init guards are not provided or cannot be implemented (ww_mutex omitted
for lack of multi-arg guard initializers), the alternative is to just
disable context analysis where guarded members are initialized.

Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/all/20251212095943.GM3911114@noisy.programming.kicks-ass.net/
Link: https://patch.msgid.link/20260119094029.1344361-3-elver@google.com

authored by

Marco Elver and committed by
Peter Zijlstra
d084a737 3b9ed303

+70 -18
+27 -3
Documentation/dev-tools/context-analysis.rst
··· 83 83 `bit_spinlock`, RCU, SRCU (`srcu_struct`), `rw_semaphore`, `local_lock_t`, 84 84 `ww_mutex`. 85 85 86 - For context locks with an initialization function (e.g., `spin_lock_init()`), 87 - calling this function before initializing any guarded members or globals 88 - prevents the compiler from issuing warnings about unguarded initialization. 86 + To initialize variables guarded by a context lock with an initialization 87 + function (``type_init(&lock)``), prefer using ``guard(type_init)(&lock)`` or 88 + ``scoped_guard(type_init, &lock) { ... }`` to initialize such guarded members 89 + or globals in the enclosing scope. This initializes the context lock and treats 90 + the context as active within the initialization scope (initialization implies 91 + exclusive access to the underlying object). 92 + 93 + For example:: 94 + 95 + struct my_data { 96 + spinlock_t lock; 97 + int counter __guarded_by(&lock); 98 + }; 99 + 100 + void init_my_data(struct my_data *d) 101 + { 102 + ... 103 + guard(spinlock_init)(&d->lock); 104 + d->counter = 0; 105 + ... 106 + } 107 + 108 + Alternatively, initializing guarded variables can be done with context analysis 109 + disabled, preferably in the smallest possible scope (due to lack of any other 110 + checking): either with a ``context_unsafe(var = init)`` expression, or by 111 + marking small initialization functions with the ``__context_unsafe(init)`` 112 + attribute. 89 113 90 114 Lockdep assertions, such as `lockdep_assert_held()`, inform the compiler's 91 115 context analysis that the associated synchronization primitive is held after
+2 -7
include/linux/compiler-context-analysis.h
··· 32 32 /* 33 33 * The "assert_capability" attribute is a bit confusingly named. It does not 34 34 * generate a check. Instead, it tells the analysis to *assume* the capability 35 - * is held. This is used for: 36 - * 37 - * 1. Augmenting runtime assertions, that can then help with patterns beyond the 38 - * compiler's static reasoning abilities. 39 - * 40 - * 2. Initialization of context locks, so we can access guarded variables right 41 - * after initialization (nothing else should access the same object yet). 35 + * is held. This is used for augmenting runtime assertions, that can then help 36 + * with patterns beyond the compiler's static reasoning abilities. 42 37 */ 43 38 # define __assumes_ctx_lock(...) __attribute__((assert_capability(__VA_ARGS__))) 44 39 # define __assumes_shared_ctx_lock(...) __attribute__((assert_shared_capability(__VA_ARGS__)))
+8
include/linux/local_lock.h
··· 104 104 local_lock_nested_bh(_T->lock), 105 105 local_unlock_nested_bh(_T->lock)) 106 106 107 + DEFINE_LOCK_GUARD_1(local_lock_init, local_lock_t, local_lock_init(_T->lock), /* */) 108 + 107 109 DECLARE_LOCK_GUARD_1_ATTRS(local_lock, __acquires(_T), __releases(*(local_lock_t __percpu **)_T)) 108 110 #define class_local_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock, _T) 109 111 DECLARE_LOCK_GUARD_1_ATTRS(local_lock_irq, __acquires(_T), __releases(*(local_lock_t __percpu **)_T)) ··· 114 112 #define class_local_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_irqsave, _T) 115 113 DECLARE_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, __acquires(_T), __releases(*(local_lock_t __percpu **)_T)) 116 114 #define class_local_lock_nested_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, _T) 115 + DECLARE_LOCK_GUARD_1_ATTRS(local_lock_init, __acquires(_T), __releases(*(local_lock_t **)_T)) 116 + #define class_local_lock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_init, _T) 117 + 118 + DEFINE_LOCK_GUARD_1(local_trylock_init, local_trylock_t, local_trylock_init(_T->lock), /* */) 119 + DECLARE_LOCK_GUARD_1_ATTRS(local_trylock_init, __acquires(_T), __releases(*(local_trylock_t **)_T)) 120 + #define class_local_trylock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_trylock_init, _T) 117 121 118 122 #endif
+1
include/linux/local_lock_internal.h
··· 6 6 #include <linux/percpu-defs.h> 7 7 #include <linux/irqflags.h> 8 8 #include <linux/lockdep.h> 9 + #include <linux/debug_locks.h> 9 10 #include <asm/current.h> 10 11 11 12 #ifndef CONFIG_PREEMPT_RT
+3
include/linux/mutex.h
··· 254 254 DEFINE_LOCK_GUARD_1(mutex, struct mutex, mutex_lock(_T->lock), mutex_unlock(_T->lock)) 255 255 DEFINE_LOCK_GUARD_1_COND(mutex, _try, mutex_trylock(_T->lock)) 256 256 DEFINE_LOCK_GUARD_1_COND(mutex, _intr, mutex_lock_interruptible(_T->lock), _RET == 0) 257 + DEFINE_LOCK_GUARD_1(mutex_init, struct mutex, mutex_init(_T->lock), /* */) 257 258 258 259 DECLARE_LOCK_GUARD_1_ATTRS(mutex, __acquires(_T), __releases(*(struct mutex **)_T)) 259 260 #define class_mutex_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex, _T) ··· 262 261 #define class_mutex_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_try, _T) 263 262 DECLARE_LOCK_GUARD_1_ATTRS(mutex_intr, __acquires(_T), __releases(*(struct mutex **)_T)) 264 263 #define class_mutex_intr_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_intr, _T) 264 + DECLARE_LOCK_GUARD_1_ATTRS(mutex_init, __acquires(_T), __releases(*(struct mutex **)_T)) 265 + #define class_mutex_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_init, _T) 265 266 266 267 extern unsigned long mutex_get_owner(struct mutex *lock); 267 268
+4
include/linux/rwsem.h
··· 280 280 DECLARE_LOCK_GUARD_1_ATTRS(rwsem_write_kill, __acquires(_T), __releases(*(struct rw_semaphore **)_T)) 281 281 #define class_rwsem_write_kill_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_write_kill, _T) 282 282 283 + DEFINE_LOCK_GUARD_1(rwsem_init, struct rw_semaphore, init_rwsem(_T->lock), /* */) 284 + DECLARE_LOCK_GUARD_1_ATTRS(rwsem_init, __acquires(_T), __releases(*(struct rw_semaphore **)_T)) 285 + #define class_rwsem_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_init, _T) 286 + 283 287 /* 284 288 * downgrade write lock to read lock 285 289 */
+5
include/linux/seqlock.h
··· 14 14 */ 15 15 16 16 #include <linux/compiler.h> 17 + #include <linux/cleanup.h> 17 18 #include <linux/kcsan-checks.h> 18 19 #include <linux/lockdep.h> 19 20 #include <linux/mutex.h> ··· 1358 1357 */ 1359 1358 #define scoped_seqlock_read(_seqlock, _target) \ 1360 1359 __scoped_seqlock_read(_seqlock, _target, __UNIQUE_ID(seqlock)) 1360 + 1361 + DEFINE_LOCK_GUARD_1(seqlock_init, seqlock_t, seqlock_init(_T->lock), /* */) 1362 + DECLARE_LOCK_GUARD_1_ATTRS(seqlock_init, __acquires(_T), __releases(*(seqlock_t **)_T)) 1363 + #define class_seqlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(seqlock_init, _T) 1361 1364 1362 1365 #endif /* __LINUX_SEQLOCK_H */
+12
include/linux/spinlock.h
··· 582 582 DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T)) 583 583 #define class_raw_spinlock_irqsave_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave_try, _T) 584 584 585 + DEFINE_LOCK_GUARD_1(raw_spinlock_init, raw_spinlock_t, raw_spin_lock_init(_T->lock), /* */) 586 + DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_init, __acquires(_T), __releases(*(raw_spinlock_t **)_T)) 587 + #define class_raw_spinlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_init, _T) 588 + 585 589 DEFINE_LOCK_GUARD_1(spinlock, spinlock_t, 586 590 spin_lock(_T->lock), 587 591 spin_unlock(_T->lock)) ··· 630 626 DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irqsave_try, __acquires(_T), __releases(*(spinlock_t **)_T)) 631 627 #define class_spinlock_irqsave_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irqsave_try, _T) 632 628 629 + DEFINE_LOCK_GUARD_1(spinlock_init, spinlock_t, spin_lock_init(_T->lock), /* */) 630 + DECLARE_LOCK_GUARD_1_ATTRS(spinlock_init, __acquires(_T), __releases(*(spinlock_t **)_T)) 631 + #define class_spinlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_init, _T) 632 + 633 633 DEFINE_LOCK_GUARD_1(read_lock, rwlock_t, 634 634 read_lock(_T->lock), 635 635 read_unlock(_T->lock)) ··· 671 663 unsigned long flags) 672 664 DECLARE_LOCK_GUARD_1_ATTRS(write_lock_irqsave, __acquires(_T), __releases(*(rwlock_t **)_T)) 673 665 #define class_write_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock_irqsave, _T) 666 + 667 + DEFINE_LOCK_GUARD_1(rwlock_init, rwlock_t, rwlock_init(_T->lock), /* */) 668 + DECLARE_LOCK_GUARD_1_ATTRS(rwlock_init, __acquires(_T), __releases(*(rwlock_t **)_T)) 669 + #define class_rwlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwlock_init, _T) 674 670 675 671 #undef __LINUX_INSIDE_SPINLOCK_H 676 672 #endif /* __LINUX_SPINLOCK_H */
+8 -8
lib/test_context-analysis.c
··· 35 35 }; \ 36 36 static void __used test_##class##_init(struct test_##class##_data *d) \ 37 37 { \ 38 - type_init(&d->lock); \ 38 + guard(type_init)(&d->lock); \ 39 39 d->counter = 0; \ 40 40 } \ 41 41 static void __used test_##class(struct test_##class##_data *d) \ ··· 83 83 84 84 TEST_SPINLOCK_COMMON(raw_spinlock, 85 85 raw_spinlock_t, 86 - raw_spin_lock_init, 86 + raw_spinlock_init, 87 87 raw_spin_lock, 88 88 raw_spin_unlock, 89 89 raw_spin_trylock, ··· 109 109 110 110 TEST_SPINLOCK_COMMON(spinlock, 111 111 spinlock_t, 112 - spin_lock_init, 112 + spinlock_init, 113 113 spin_lock, 114 114 spin_unlock, 115 115 spin_trylock, ··· 163 163 164 164 static void __used test_mutex_init(struct test_mutex_data *d) 165 165 { 166 - mutex_init(&d->mtx); 166 + guard(mutex_init)(&d->mtx); 167 167 d->counter = 0; 168 168 } 169 169 ··· 226 226 227 227 static void __used test_seqlock_init(struct test_seqlock_data *d) 228 228 { 229 - seqlock_init(&d->sl); 229 + guard(seqlock_init)(&d->sl); 230 230 d->counter = 0; 231 231 } 232 232 ··· 275 275 276 276 static void __used test_rwsem_init(struct test_rwsem_data *d) 277 277 { 278 - init_rwsem(&d->sem); 278 + guard(rwsem_init)(&d->sem); 279 279 d->counter = 0; 280 280 } 281 281 ··· 475 475 476 476 static void __used test_local_lock_init(struct test_local_lock_data *d) 477 477 { 478 - local_lock_init(&d->lock); 478 + guard(local_lock_init)(&d->lock); 479 479 d->counter = 0; 480 480 } 481 481 ··· 519 519 520 520 static void __used test_local_trylock_init(struct test_local_trylock_data *d) 521 521 { 522 - local_trylock_init(&d->lock); 522 + guard(local_trylock_init)(&d->lock); 523 523 d->counter = 0; 524 524 } 525 525