Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

locking/mutex: Support Clang's context analysis

Add support for Clang's context analysis for mutex.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20251219154418.3592607-11-elver@google.com

authored by

Marco Elver and committed by
Peter Zijlstra
370f0a34 38f1311a

+90 -18
+1 -1
Documentation/dev-tools/context-analysis.rst
··· 79 79 ~~~~~~~~~~~~~~~~~~~~~~~~~~~ 80 80 81 81 Currently the following synchronization primitives are supported: 82 - `raw_spinlock_t`, `spinlock_t`, `rwlock_t`. 82 + `raw_spinlock_t`, `spinlock_t`, `rwlock_t`, `mutex`. 83 83 84 84 For context locks with an initialization function (e.g., `spin_lock_init()`), 85 85 calling this function before initializing any guarded members or globals
+23 -15
include/linux/mutex.h
··· 62 62 static struct lock_class_key __key; \ 63 63 \ 64 64 __mutex_init((mutex), #mutex, &__key); \ 65 + __assume_ctx_lock(mutex); \ 65 66 } while (0) 66 67 67 68 /** ··· 183 182 * Also see Documentation/locking/mutex-design.rst. 184 183 */ 185 184 #ifdef CONFIG_DEBUG_LOCK_ALLOC 186 - extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 185 + extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass) __acquires(lock); 187 186 extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); 188 187 extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, 189 - unsigned int subclass); 188 + unsigned int subclass) __cond_acquires(0, lock); 190 189 extern int __must_check _mutex_lock_killable(struct mutex *lock, 191 - unsigned int subclass, struct lockdep_map *nest_lock); 192 - extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass); 190 + unsigned int subclass, struct lockdep_map *nest_lock) __cond_acquires(0, lock); 191 + extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) __acquires(lock); 193 192 194 193 #define mutex_lock(lock) mutex_lock_nested(lock, 0) 195 194 #define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) ··· 212 211 _mutex_lock_killable(lock, subclass, NULL) 213 212 214 213 #else 215 - extern void mutex_lock(struct mutex *lock); 216 - extern int __must_check mutex_lock_interruptible(struct mutex *lock); 217 - extern int __must_check mutex_lock_killable(struct mutex *lock); 218 - extern void mutex_lock_io(struct mutex *lock); 214 + extern void mutex_lock(struct mutex *lock) __acquires(lock); 215 + extern int __must_check mutex_lock_interruptible(struct mutex *lock) __cond_acquires(0, lock); 216 + extern int __must_check mutex_lock_killable(struct mutex *lock) __cond_acquires(0, lock); 217 + extern void mutex_lock_io(struct mutex *lock) __acquires(lock); 219 218 220 219 # define mutex_lock_nested(lock, subclass) mutex_lock(lock) 221 220 # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) ··· 233 232 */ 234 233 235 234 #ifdef CONFIG_DEBUG_LOCK_ALLOC 236 - extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); 235 + extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock) __cond_acquires(true, lock); 237 236 238 237 #define mutex_trylock_nest_lock(lock, nest_lock) \ 239 238 ( \ ··· 243 242 244 243 #define mutex_trylock(lock) _mutex_trylock_nest_lock(lock, NULL) 245 244 #else 246 - extern int mutex_trylock(struct mutex *lock); 245 + extern int mutex_trylock(struct mutex *lock) __cond_acquires(true, lock); 247 246 #define mutex_trylock_nest_lock(lock, nest_lock) mutex_trylock(lock) 248 247 #endif 249 248 250 - extern void mutex_unlock(struct mutex *lock); 249 + extern void mutex_unlock(struct mutex *lock) __releases(lock); 251 250 252 - extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); 251 + extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) __cond_acquires(true, lock); 253 252 254 - DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T)) 255 - DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T)) 256 - DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T), _RET == 0) 253 + DEFINE_LOCK_GUARD_1(mutex, struct mutex, mutex_lock(_T->lock), mutex_unlock(_T->lock)) 254 + DEFINE_LOCK_GUARD_1_COND(mutex, _try, mutex_trylock(_T->lock)) 255 + DEFINE_LOCK_GUARD_1_COND(mutex, _intr, mutex_lock_interruptible(_T->lock), _RET == 0) 256 + 257 + DECLARE_LOCK_GUARD_1_ATTRS(mutex, __acquires(_T), __releases(*(struct mutex **)_T)) 258 + #define class_mutex_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex, _T) 259 + DECLARE_LOCK_GUARD_1_ATTRS(mutex_try, __acquires(_T), __releases(*(struct mutex **)_T)) 260 + #define class_mutex_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_try, _T) 261 + DECLARE_LOCK_GUARD_1_ATTRS(mutex_intr, __acquires(_T), __releases(*(struct mutex **)_T)) 262 + #define class_mutex_intr_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_intr, _T) 257 263 258 264 extern unsigned long mutex_get_owner(struct mutex *lock); 259 265
+2 -2
include/linux/mutex_types.h
··· 38 38 * - detects multi-task circular deadlocks and prints out all affected 39 39 * locks and tasks (and only those tasks) 40 40 */ 41 - struct mutex { 41 + context_lock_struct(mutex) { 42 42 atomic_long_t owner; 43 43 raw_spinlock_t wait_lock; 44 44 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER ··· 59 59 */ 60 60 #include <linux/rtmutex.h> 61 61 62 - struct mutex { 62 + context_lock_struct(mutex) { 63 63 struct rt_mutex_base rtmutex; 64 64 #ifdef CONFIG_DEBUG_LOCK_ALLOC 65 65 struct lockdep_map dep_map;
+64
lib/test_context-analysis.c
··· 5 5 */ 6 6 7 7 #include <linux/build_bug.h> 8 + #include <linux/mutex.h> 8 9 #include <linux/spinlock.h> 9 10 10 11 /* ··· 145 144 read_unlock, 146 145 read_trylock, 147 146 TEST_OP_RO); 147 + 148 + struct test_mutex_data { 149 + struct mutex mtx; 150 + int counter __guarded_by(&mtx); 151 + }; 152 + 153 + static void __used test_mutex_init(struct test_mutex_data *d) 154 + { 155 + mutex_init(&d->mtx); 156 + d->counter = 0; 157 + } 158 + 159 + static void __used test_mutex_lock(struct test_mutex_data *d) 160 + { 161 + mutex_lock(&d->mtx); 162 + d->counter++; 163 + mutex_unlock(&d->mtx); 164 + mutex_lock_io(&d->mtx); 165 + d->counter++; 166 + mutex_unlock(&d->mtx); 167 + } 168 + 169 + static void __used test_mutex_trylock(struct test_mutex_data *d, atomic_t *a) 170 + { 171 + if (!mutex_lock_interruptible(&d->mtx)) { 172 + d->counter++; 173 + mutex_unlock(&d->mtx); 174 + } 175 + if (!mutex_lock_killable(&d->mtx)) { 176 + d->counter++; 177 + mutex_unlock(&d->mtx); 178 + } 179 + if (mutex_trylock(&d->mtx)) { 180 + d->counter++; 181 + mutex_unlock(&d->mtx); 182 + } 183 + if (atomic_dec_and_mutex_lock(a, &d->mtx)) { 184 + d->counter++; 185 + mutex_unlock(&d->mtx); 186 + } 187 + } 188 + 189 + static void __used test_mutex_assert(struct test_mutex_data *d) 190 + { 191 + lockdep_assert_held(&d->mtx); 192 + d->counter++; 193 + } 194 + 195 + static void __used test_mutex_guard(struct test_mutex_data *d) 196 + { 197 + guard(mutex)(&d->mtx); 198 + d->counter++; 199 + } 200 + 201 + static void __used test_mutex_cond_guard(struct test_mutex_data *d) 202 + { 203 + scoped_cond_guard(mutex_try, return, &d->mtx) { 204 + d->counter++; 205 + } 206 + scoped_cond_guard(mutex_intr, return, &d->mtx) { 207 + d->counter++; 208 + } 209 + }