Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

locking/mutex: Remove the list_head from struct mutex

Instead of embedding a list_head in struct mutex, store a pointer to
the first waiter. The list of waiters remains a doubly linked list so
we can efficiently add to the tail of the list, remove from the front
(or middle) of the list.

Some of the list manipulation becomes more complicated, but it's a
reasonable tradeoff on the slow paths to shrink data structures which
embed a mutex like struct file.

Some of the debug checks have to be deleted because there's no equivalent
to checking them in the new scheme (eg an empty waiter->list now means
that it is the only waiter, not that the waiter is no longer on the list).

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20260305195545.3707590-4-willy@infradead.org

authored by

Matthew Wilcox (Oracle) and committed by
Peter Zijlstra
25500ba7 b9bdd4b6

+37 -46
+1 -1
include/linux/mutex.h
··· 79 79 #define __MUTEX_INITIALIZER(lockname) \ 80 80 { .owner = ATOMIC_LONG_INIT(0) \ 81 81 , .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ 82 - , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ 82 + , .first_waiter = NULL \ 83 83 __DEBUG_MUTEX_INITIALIZER(lockname) \ 84 84 __DEP_MAP_MUTEX_INITIALIZER(lockname) } 85 85
+1 -1
include/linux/mutex_types.h
··· 44 44 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 45 45 struct optimistic_spin_queue osq; /* Spinner MCS lock */ 46 46 #endif 47 - struct list_head wait_list; 47 + struct mutex_waiter *first_waiter; 48 48 #ifdef CONFIG_DEBUG_MUTEXES 49 49 void *magic; 50 50 #endif
+1 -4
kernel/locking/mutex-debug.c
··· 37 37 void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter) 38 38 { 39 39 lockdep_assert_held(&lock->wait_lock); 40 - DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list)); 40 + DEBUG_LOCKS_WARN_ON(!lock->first_waiter); 41 41 DEBUG_LOCKS_WARN_ON(waiter->magic != waiter); 42 - DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list)); 43 42 } 44 43 45 44 void debug_mutex_free_waiter(struct mutex_waiter *waiter) ··· 61 62 { 62 63 struct mutex *blocked_on = __get_task_blocked_on(task); 63 64 64 - DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list)); 65 65 DEBUG_LOCKS_WARN_ON(waiter->task != task); 66 66 DEBUG_LOCKS_WARN_ON(blocked_on && blocked_on != lock); 67 67 ··· 72 74 { 73 75 if (likely(debug_locks)) { 74 76 DEBUG_LOCKS_WARN_ON(lock->magic != lock); 75 - DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); 76 77 } 77 78 } 78 79
+27 -22
kernel/locking/mutex.c
··· 47 47 { 48 48 atomic_long_set(&lock->owner, 0); 49 49 raw_spin_lock_init(&lock->wait_lock); 50 - INIT_LIST_HEAD(&lock->wait_list); 50 + lock->first_waiter = NULL; 51 51 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 52 52 osq_lock_init(&lock->osq); 53 53 #endif ··· 194 194 atomic_long_andnot(flag, &lock->owner); 195 195 } 196 196 197 - static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) 198 - { 199 - return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; 200 - } 201 - 202 197 /* 203 198 * Add @waiter to a given location in the lock wait_list and set the 204 199 * FLAG_WAITERS flag if it's the first waiter. 205 200 */ 206 201 static void 207 202 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, 208 - struct list_head *list) 203 + struct mutex_waiter *first) 209 204 { 210 205 hung_task_set_blocker(lock, BLOCKER_TYPE_MUTEX); 211 206 debug_mutex_add_waiter(lock, waiter, current); 212 207 213 - list_add_tail(&waiter->list, list); 214 - if (__mutex_waiter_is_first(lock, waiter)) 208 + if (!first) 209 + first = lock->first_waiter; 210 + 211 + if (first) { 212 + list_add_tail(&waiter->list, &first->list); 213 + } else { 214 + INIT_LIST_HEAD(&waiter->list); 215 + lock->first_waiter = waiter; 215 216 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); 217 + } 216 218 } 217 219 218 220 static void 219 221 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter) 220 222 { 221 - list_del(&waiter->list); 222 - if (likely(list_empty(&lock->wait_list))) 223 + if (list_empty(&waiter->list)) { 223 224 __mutex_clear_flag(lock, MUTEX_FLAGS); 225 + lock->first_waiter = NULL; 226 + } else { 227 + if (lock->first_waiter == waiter) { 228 + lock->first_waiter = list_first_entry(&waiter->list, 229 + struct mutex_waiter, list); 230 + } 231 + list_del(&waiter->list); 232 + } 224 233 225 234 debug_mutex_remove_waiter(lock, waiter, current); 226 235 hung_task_clear_blocker(); ··· 349 340 * Similarly, stop spinning if we are no longer the 350 341 * first waiter. 351 342 */ 352 - if (waiter && !__mutex_waiter_is_first(lock, waiter)) 343 + if (waiter && lock->first_waiter != waiter) 353 344 return false; 354 345 355 346 return true; ··· 654 645 655 646 if (!use_ww_ctx) { 656 647 /* add waiting tasks to the end of the waitqueue (FIFO): */ 657 - __mutex_add_waiter(lock, &waiter, &lock->wait_list); 648 + __mutex_add_waiter(lock, &waiter, NULL); 658 649 } else { 659 650 /* 660 651 * Add in stamp order, waking up waiters that must kill ··· 700 691 701 692 schedule_preempt_disabled(); 702 693 703 - first = __mutex_waiter_is_first(lock, &waiter); 694 + first = lock->first_waiter == &waiter; 704 695 705 696 /* 706 697 * As we likely have been woken up by task ··· 743 734 * Wound-Wait; we stole the lock (!first_waiter), check the 744 735 * waiters as anyone might want to wound us. 745 736 */ 746 - if (!ww_ctx->is_wait_die && 747 - !__mutex_waiter_is_first(lock, &waiter)) 737 + if (!ww_ctx->is_wait_die && lock->first_waiter != &waiter) 748 738 __ww_mutex_check_waiters(lock, ww_ctx, &wake_q); 749 739 } 750 740 ··· 939 931 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) 940 932 { 941 933 struct task_struct *next = NULL; 934 + struct mutex_waiter *waiter; 942 935 DEFINE_WAKE_Q(wake_q); 943 936 unsigned long owner; 944 937 unsigned long flags; ··· 971 962 972 963 raw_spin_lock_irqsave(&lock->wait_lock, flags); 973 964 debug_mutex_unlock(lock); 974 - if (!list_empty(&lock->wait_list)) { 975 - /* get the first entry from the wait-list: */ 976 - struct mutex_waiter *waiter = 977 - list_first_entry(&lock->wait_list, 978 - struct mutex_waiter, list); 979 - 965 + waiter = lock->first_waiter; 966 + if (waiter) { 980 967 next = waiter->task; 981 968 982 969 debug_mutex_wake_waiter(lock, waiter);
+7 -18
kernel/locking/ww_mutex.h
··· 8 8 static inline struct mutex_waiter * 9 9 __ww_waiter_first(struct mutex *lock) 10 10 { 11 - struct mutex_waiter *w; 12 - 13 - w = list_first_entry(&lock->wait_list, struct mutex_waiter, list); 14 - if (list_entry_is_head(w, &lock->wait_list, list)) 15 - return NULL; 16 - 17 - return w; 11 + return lock->first_waiter; 18 12 } 19 13 20 14 static inline struct mutex_waiter * 21 15 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w) 22 16 { 23 17 w = list_next_entry(w, list); 24 - if (list_entry_is_head(w, &lock->wait_list, list)) 18 + if (lock->first_waiter == w) 25 19 return NULL; 26 20 27 21 return w; ··· 25 31 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w) 26 32 { 27 33 w = list_prev_entry(w, list); 28 - if (list_entry_is_head(w, &lock->wait_list, list)) 34 + if (lock->first_waiter == w) 29 35 return NULL; 30 36 31 37 return w; ··· 34 40 static inline struct mutex_waiter * 35 41 __ww_waiter_last(struct mutex *lock) 36 42 { 37 - struct mutex_waiter *w; 43 + struct mutex_waiter *w = lock->first_waiter; 38 44 39 - w = list_last_entry(&lock->wait_list, struct mutex_waiter, list); 40 - if (list_entry_is_head(w, &lock->wait_list, list)) 41 - return NULL; 42 - 45 + if (w) 46 + w = list_prev_entry(w, list); 43 47 return w; 44 48 } 45 49 46 50 static inline void 47 51 __ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos) 48 52 { 49 - struct list_head *p = &lock->wait_list; 50 - if (pos) 51 - p = &pos->list; 52 - __mutex_add_waiter(lock, waiter, p); 53 + __mutex_add_waiter(lock, waiter, pos); 53 54 } 54 55 55 56 static inline struct task_struct *