Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

locking/mutex: Add context analysis

Add compiler context analysis annotations.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20260121111213.745353747@infradead.org

+45 -7
+1 -1
include/linux/mutex.h
··· 183 183 */ 184 184 #ifdef CONFIG_DEBUG_LOCK_ALLOC 185 185 extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass) __acquires(lock); 186 - extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); 186 + extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock) __acquires(lock); 187 187 extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, 188 188 unsigned int subclass) __cond_acquires(0, lock); 189 189 extern int __must_check _mutex_lock_killable(struct mutex *lock,
+1 -1
include/linux/mutex_types.h
··· 44 44 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 45 45 struct optimistic_spin_queue osq; /* Spinner MCS lock */ 46 46 #endif 47 - struct mutex_waiter *first_waiter; 47 + struct mutex_waiter *first_waiter __guarded_by(&wait_lock); 48 48 #ifdef CONFIG_DEBUG_MUTEXES 49 49 void *magic; 50 50 #endif
+2
kernel/locking/Makefile
··· 3 3 # and is generally not a function of system call inputs. 4 4 KCOV_INSTRUMENT := n 5 5 6 + CONTEXT_ANALYSIS_mutex.o := y 7 + 6 8 obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o 7 9 8 10 # Avoid recursion lockdep -> sanitizer -> ... -> lockdep & improve performance.
+28 -5
kernel/locking/mutex.c
··· 46 46 static void __mutex_init_generic(struct mutex *lock) 47 47 { 48 48 atomic_long_set(&lock->owner, 0); 49 - raw_spin_lock_init(&lock->wait_lock); 50 - lock->first_waiter = NULL; 49 + scoped_guard (raw_spinlock_init, &lock->wait_lock) { 50 + lock->first_waiter = NULL; 51 + } 51 52 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 52 53 osq_lock_init(&lock->osq); 53 54 #endif ··· 151 150 * follow with a __mutex_trylock() before failing. 152 151 */ 153 152 static __always_inline bool __mutex_trylock_fast(struct mutex *lock) 153 + __cond_acquires(true, lock) 154 154 { 155 155 unsigned long curr = (unsigned long)current; 156 156 unsigned long zero = 0UL; ··· 165 163 } 166 164 167 165 static __always_inline bool __mutex_unlock_fast(struct mutex *lock) 166 + __cond_releases(true, lock) 168 167 { 169 168 unsigned long curr = (unsigned long)current; 170 169 ··· 204 201 static void 205 202 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, 206 203 struct mutex_waiter *first) 204 + __must_hold(&lock->wait_lock) 207 205 { 208 206 hung_task_set_blocker(lock, BLOCKER_TYPE_MUTEX); 209 207 debug_mutex_add_waiter(lock, waiter, current); ··· 223 219 224 220 static void 225 221 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter) 222 + __must_hold(&lock->wait_lock) 226 223 { 227 224 if (list_empty(&waiter->list)) { 228 225 __mutex_clear_flag(lock, MUTEX_FLAGS); ··· 273 268 * We also put the fastpath first in the kernel image, to make sure the 274 269 * branch is predicted by the CPU as default-untaken. 275 270 */ 276 - static void __sched __mutex_lock_slowpath(struct mutex *lock); 271 + static void __sched __mutex_lock_slowpath(struct mutex *lock) 272 + __acquires(lock); 277 273 278 274 /** 279 275 * mutex_lock - acquire the mutex ··· 355 349 * Similarly, stop spinning if we are no longer the 356 350 * first waiter. 357 351 */ 358 - if (waiter && lock->first_waiter != waiter) 352 + if (waiter && data_race(lock->first_waiter != waiter)) 359 353 return false; 360 354 361 355 return true; ··· 540 534 } 541 535 #endif 542 536 543 - static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip); 537 + static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) 538 + __releases(lock); 544 539 545 540 /** 546 541 * mutex_unlock - release the mutex ··· 581 574 * of a unlocked mutex is not allowed. 582 575 */ 583 576 void __sched ww_mutex_unlock(struct ww_mutex *lock) 577 + __no_context_analysis 584 578 { 585 579 __ww_mutex_unlock(lock); 586 580 mutex_unlock(&lock->base); ··· 595 587 __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass, 596 588 struct lockdep_map *nest_lock, unsigned long ip, 597 589 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) 590 + __cond_acquires(0, lock) 598 591 { 599 592 DEFINE_WAKE_Q(wake_q); 600 593 struct mutex_waiter waiter; ··· 789 780 static int __sched 790 781 __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, 791 782 struct lockdep_map *nest_lock, unsigned long ip) 783 + __cond_acquires(0, lock) 792 784 { 793 785 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false); 794 786 } ··· 797 787 static int __sched 798 788 __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, 799 789 unsigned long ip, struct ww_acquire_ctx *ww_ctx) 790 + __cond_acquires(0, lock) 800 791 { 801 792 return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true); 802 793 } ··· 845 834 mutex_lock_nested(struct mutex *lock, unsigned int subclass) 846 835 { 847 836 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); 837 + __acquire(lock); 848 838 } 849 839 850 840 EXPORT_SYMBOL_GPL(mutex_lock_nested); ··· 854 842 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) 855 843 { 856 844 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); 845 + __acquire(lock); 857 846 } 858 847 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 859 848 ··· 883 870 token = io_schedule_prepare(); 884 871 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 885 872 subclass, NULL, _RET_IP_, NULL, 0); 873 + __acquire(lock); 886 874 io_schedule_finish(token); 887 875 } 888 876 EXPORT_SYMBOL_GPL(mutex_lock_io_nested); 889 877 890 878 static inline int 891 879 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 880 + __cond_releases(nonzero, lock) 892 881 { 893 882 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 894 883 unsigned tmp; ··· 952 937 * Release the lock, slowpath: 953 938 */ 954 939 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) 940 + __releases(lock) 955 941 { 956 942 struct task_struct *next = NULL; 957 943 struct mutex_waiter *waiter; ··· 961 945 unsigned long flags; 962 946 963 947 mutex_release(&lock->dep_map, ip); 948 + __release(lock); 964 949 965 950 /* 966 951 * Release the lock before (potentially) taking the spinlock such that ··· 1083 1066 1084 1067 static noinline void __sched 1085 1068 __mutex_lock_slowpath(struct mutex *lock) 1069 + __acquires(lock) 1086 1070 { 1087 1071 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); 1072 + __acquire(lock); 1088 1073 } 1089 1074 1090 1075 static noinline int __sched 1091 1076 __mutex_lock_killable_slowpath(struct mutex *lock) 1077 + __cond_acquires(0, lock) 1092 1078 { 1093 1079 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); 1094 1080 } 1095 1081 1096 1082 static noinline int __sched 1097 1083 __mutex_lock_interruptible_slowpath(struct mutex *lock) 1084 + __cond_acquires(0, lock) 1098 1085 { 1099 1086 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); 1100 1087 } 1101 1088 1102 1089 static noinline int __sched 1103 1090 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1091 + __cond_acquires(0, lock) 1104 1092 { 1105 1093 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, 1106 1094 _RET_IP_, ctx); ··· 1114 1092 static noinline int __sched 1115 1093 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, 1116 1094 struct ww_acquire_ctx *ctx) 1095 + __cond_acquires(0, lock) 1117 1096 { 1118 1097 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, 1119 1098 _RET_IP_, ctx);
+1
kernel/locking/mutex.h
··· 7 7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 8 8 */ 9 9 #ifndef CONFIG_PREEMPT_RT 10 + #include <linux/mutex.h> 10 11 /* 11 12 * This is the control structure for tasks blocked on mutex, which resides 12 13 * on the blocked task's kernel stack:
+12
kernel/locking/ww_mutex.h
··· 7 7 8 8 static inline struct mutex_waiter * 9 9 __ww_waiter_first(struct mutex *lock) 10 + __must_hold(&lock->wait_lock) 10 11 { 11 12 return lock->first_waiter; 12 13 } 13 14 14 15 static inline struct mutex_waiter * 15 16 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w) 17 + __must_hold(&lock->wait_lock) 16 18 { 17 19 w = list_next_entry(w, list); 18 20 if (lock->first_waiter == w) ··· 25 23 26 24 static inline struct mutex_waiter * 27 25 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w) 26 + __must_hold(&lock->wait_lock) 28 27 { 29 28 w = list_prev_entry(w, list); 30 29 if (lock->first_waiter == w) ··· 36 33 37 34 static inline struct mutex_waiter * 38 35 __ww_waiter_last(struct mutex *lock) 36 + __must_hold(&lock->wait_lock) 39 37 { 40 38 struct mutex_waiter *w = lock->first_waiter; 41 39 ··· 47 43 48 44 static inline void 49 45 __ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos) 46 + __must_hold(&lock->wait_lock) 50 47 { 51 48 __mutex_add_waiter(lock, waiter, pos); 52 49 } ··· 65 60 } 66 61 67 62 static inline void lock_wait_lock(struct mutex *lock, unsigned long *flags) 63 + __acquires(&lock->wait_lock) 68 64 { 69 65 raw_spin_lock_irqsave(&lock->wait_lock, *flags); 70 66 } 71 67 72 68 static inline void unlock_wait_lock(struct mutex *lock, unsigned long *flags) 69 + __releases(&lock->wait_lock) 73 70 { 74 71 raw_spin_unlock_irqrestore(&lock->wait_lock, *flags); 75 72 } 76 73 77 74 static inline void lockdep_assert_wait_lock_held(struct mutex *lock) 75 + __must_hold(&lock->wait_lock) 78 76 { 79 77 lockdep_assert_held(&lock->wait_lock); 80 78 } ··· 304 296 struct ww_acquire_ctx *ww_ctx, 305 297 struct ww_acquire_ctx *hold_ctx, 306 298 struct wake_q_head *wake_q) 299 + __must_hold(&lock->wait_lock) 307 300 { 308 301 struct task_struct *owner = __ww_mutex_owner(lock); 309 302 ··· 369 360 static void 370 361 __ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx, 371 362 struct wake_q_head *wake_q) 363 + __must_hold(&lock->wait_lock) 372 364 { 373 365 struct MUTEX_WAITER *cur; 374 366 ··· 463 453 static inline int 464 454 __ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter, 465 455 struct ww_acquire_ctx *ctx) 456 + __must_hold(&lock->wait_lock) 466 457 { 467 458 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 468 459 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); ··· 514 503 struct MUTEX *lock, 515 504 struct ww_acquire_ctx *ww_ctx, 516 505 struct wake_q_head *wake_q) 506 + __must_hold(&lock->wait_lock) 517 507 { 518 508 struct MUTEX_WAITER *cur, *pos = NULL; 519 509 bool is_wait_die;