Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'locking_urgent_for_v5.9_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fixes from Borislav Petkov:
"Two fixes from the locking/urgent pile:

- Fix lockdep's detection of "USED" <- "IN-NMI" inversions (Peter
Zijlstra)

- Make percpu-rwsem operations on the semaphore's ->read_count
IRQ-safe because it can be used in an IRQ context (Hou Tao)"

* tag 'locking_urgent_for_v5.9_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
locking/percpu-rwsem: Use this_cpu_{inc,dec}() for read_count
locking/lockdep: Fix "USED" <- "IN-NMI" inversions

+37 -12
+4 -4
include/linux/percpu-rwsem.h
··· 60 60 * anything we did within this RCU-sched read-size critical section. 61 61 */ 62 62 if (likely(rcu_sync_is_idle(&sem->rss))) 63 - __this_cpu_inc(*sem->read_count); 63 + this_cpu_inc(*sem->read_count); 64 64 else 65 65 __percpu_down_read(sem, false); /* Unconditional memory barrier */ 66 66 /* ··· 79 79 * Same as in percpu_down_read(). 80 80 */ 81 81 if (likely(rcu_sync_is_idle(&sem->rss))) 82 - __this_cpu_inc(*sem->read_count); 82 + this_cpu_inc(*sem->read_count); 83 83 else 84 84 ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */ 85 85 preempt_enable(); ··· 103 103 * Same as in percpu_down_read(). 104 104 */ 105 105 if (likely(rcu_sync_is_idle(&sem->rss))) { 106 - __this_cpu_dec(*sem->read_count); 106 + this_cpu_dec(*sem->read_count); 107 107 } else { 108 108 /* 109 109 * slowpath; reader will only ever wake a single blocked ··· 115 115 * aggregate zero, as that is the only time it matters) they 116 116 * will also see our critical section. 117 117 */ 118 - __this_cpu_dec(*sem->read_count); 118 + this_cpu_dec(*sem->read_count); 119 119 rcuwait_wake_up(&sem->writer); 120 120 } 121 121 preempt_enable();
+29 -6
kernel/locking/lockdep.c
··· 3969 3969 static int mark_lock(struct task_struct *curr, struct held_lock *this, 3970 3970 enum lock_usage_bit new_bit) 3971 3971 { 3972 - unsigned int new_mask = 1 << new_bit, ret = 1; 3972 + unsigned int old_mask, new_mask, ret = 1; 3973 3973 3974 3974 if (new_bit >= LOCK_USAGE_STATES) { 3975 3975 DEBUG_LOCKS_WARN_ON(1); 3976 3976 return 0; 3977 3977 } 3978 + 3979 + if (new_bit == LOCK_USED && this->read) 3980 + new_bit = LOCK_USED_READ; 3981 + 3982 + new_mask = 1 << new_bit; 3978 3983 3979 3984 /* 3980 3985 * If already set then do not dirty the cacheline, ··· 3993 3988 /* 3994 3989 * Make sure we didn't race: 3995 3990 */ 3996 - if (unlikely(hlock_class(this)->usage_mask & new_mask)) { 3997 - graph_unlock(); 3998 - return 1; 3999 - } 3991 + if (unlikely(hlock_class(this)->usage_mask & new_mask)) 3992 + goto unlock; 4000 3993 3994 + old_mask = hlock_class(this)->usage_mask; 4001 3995 hlock_class(this)->usage_mask |= new_mask; 3996 + 3997 + /* 3998 + * Save one usage_traces[] entry and map both LOCK_USED and 3999 + * LOCK_USED_READ onto the same entry. 4000 + */ 4001 + if (new_bit == LOCK_USED || new_bit == LOCK_USED_READ) { 4002 + if (old_mask & (LOCKF_USED | LOCKF_USED_READ)) 4003 + goto unlock; 4004 + new_bit = LOCK_USED; 4005 + } 4002 4006 4003 4007 if (!(hlock_class(this)->usage_traces[new_bit] = save_trace())) 4004 4008 return 0; ··· 4022 4008 return 0; 4023 4009 } 4024 4010 4011 + unlock: 4025 4012 graph_unlock(); 4026 4013 4027 4014 /* ··· 4957 4942 { 4958 4943 #ifdef CONFIG_PROVE_LOCKING 4959 4944 struct lock_class *class = look_up_lock_class(lock, subclass); 4945 + unsigned long mask = LOCKF_USED; 4960 4946 4961 4947 /* if it doesn't have a class (yet), it certainly hasn't been used yet */ 4962 4948 if (!class) 4963 4949 return; 4964 4950 4965 - if (!(class->usage_mask & LOCK_USED)) 4951 + /* 4952 + * READ locks only conflict with USED, such that if we only ever use 4953 + * READ locks, there is no deadlock possible -- RCU. 4954 + */ 4955 + if (!hlock->read) 4956 + mask |= LOCKF_USED_READ; 4957 + 4958 + if (!(class->usage_mask & mask)) 4966 4959 return; 4967 4960 4968 4961 hlock->class_idx = class - lock_classes;
+2
kernel/locking/lockdep_internals.h
··· 19 19 #include "lockdep_states.h" 20 20 #undef LOCKDEP_STATE 21 21 LOCK_USED, 22 + LOCK_USED_READ, 22 23 LOCK_USAGE_STATES 23 24 }; 24 25 ··· 41 40 #include "lockdep_states.h" 42 41 #undef LOCKDEP_STATE 43 42 __LOCKF(USED) 43 + __LOCKF(USED_READ) 44 44 }; 45 45 46 46 #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE |
+2 -2
kernel/locking/percpu-rwsem.c
··· 45 45 46 46 static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem) 47 47 { 48 - __this_cpu_inc(*sem->read_count); 48 + this_cpu_inc(*sem->read_count); 49 49 50 50 /* 51 51 * Due to having preemption disabled the decrement happens on ··· 71 71 if (likely(!atomic_read_acquire(&sem->block))) 72 72 return true; 73 73 74 - __this_cpu_dec(*sem->read_count); 74 + this_cpu_dec(*sem->read_count); 75 75 76 76 /* Prod writer to re-evaluate readers_active_check() */ 77 77 rcuwait_wake_up(&sem->writer);