Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/* rwsem.h: R/W semaphores, public interface
3 *
4 * Written by David Howells (dhowells@redhat.com).
5 * Derived from asm-i386/semaphore.h
6 */
7
8#ifndef _LINUX_RWSEM_H
9#define _LINUX_RWSEM_H
10
11#include <linux/linkage.h>
12
13#include <linux/types.h>
14#include <linux/list.h>
15#include <linux/spinlock.h>
16#include <linux/atomic.h>
17#include <linux/err.h>
18#include <linux/cleanup.h>
19
20#ifdef CONFIG_DEBUG_LOCK_ALLOC
21# define __RWSEM_DEP_MAP_INIT(lockname) \
22 .dep_map = { \
23 .name = #lockname, \
24 .wait_type_inner = LD_WAIT_SLEEP, \
25 },
26#else
27# define __RWSEM_DEP_MAP_INIT(lockname)
28#endif
29
30#ifndef CONFIG_PREEMPT_RT
31
32#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
33#include <linux/osq_lock.h>
34#endif
35
36/*
37 * For an uncontended rwsem, count and owner are the only fields a task
38 * needs to touch when acquiring the rwsem. So they are put next to each
39 * other to increase the chance that they will share the same cacheline.
40 *
41 * In a contended rwsem, the owner is likely the most frequently accessed
42 * field in the structure as the optimistic waiter that holds the osq lock
43 * will spin on owner. For an embedded rwsem, other hot fields in the
44 * containing structure should be moved further away from the rwsem to
45 * reduce the chance that they will share the same cacheline causing
46 * cacheline bouncing problem.
47 */
48context_lock_struct(rw_semaphore) {
49 atomic_long_t count;
50 /*
51 * Write owner or one of the read owners as well flags regarding
52 * the current state of the rwsem. Can be used as a speculative
53 * check to see if the write owner is running on the cpu.
54 */
55 atomic_long_t owner;
56#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
57 struct optimistic_spin_queue osq; /* spinner MCS lock */
58#endif
59 raw_spinlock_t wait_lock;
60 struct rwsem_waiter *first_waiter __guarded_by(&wait_lock);
61#ifdef CONFIG_DEBUG_RWSEMS
62 void *magic;
63#endif
64#ifdef CONFIG_DEBUG_LOCK_ALLOC
65 struct lockdep_map dep_map;
66#endif
67};
68
69#define RWSEM_UNLOCKED_VALUE 0UL
70#define RWSEM_WRITER_LOCKED (1UL << 0)
71#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
72
73static inline int rwsem_is_locked(struct rw_semaphore *sem)
74{
75 return atomic_long_read(&sem->count) != RWSEM_UNLOCKED_VALUE;
76}
77
78static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
79 __assumes_ctx_lock(sem)
80{
81 WARN_ON(atomic_long_read(&sem->count) == RWSEM_UNLOCKED_VALUE);
82}
83
84static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
85 __assumes_ctx_lock(sem)
86{
87 WARN_ON(!(atomic_long_read(&sem->count) & RWSEM_WRITER_LOCKED));
88}
89
90/* Common initializer macros and functions */
91
92#ifdef CONFIG_DEBUG_RWSEMS
93# define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname,
94#else
95# define __RWSEM_DEBUG_INIT(lockname)
96#endif
97
98#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
99#define __RWSEM_OPT_INIT(lockname) .osq = OSQ_LOCK_UNLOCKED,
100#else
101#define __RWSEM_OPT_INIT(lockname)
102#endif
103
104#define __RWSEM_INITIALIZER(name) \
105 { __RWSEM_COUNT_INIT(name), \
106 .owner = ATOMIC_LONG_INIT(0), \
107 __RWSEM_OPT_INIT(name) \
108 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),\
109 .first_waiter = NULL, \
110 __RWSEM_DEBUG_INIT(name) \
111 __RWSEM_DEP_MAP_INIT(name) }
112
113#define DECLARE_RWSEM(name) \
114 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
115
116extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
117 struct lock_class_key *key);
118
119#define init_rwsem(sem) \
120do { \
121 static struct lock_class_key __key; \
122 \
123 __init_rwsem((sem), #sem, &__key); \
124} while (0)
125
126/*
127 * This is the same regardless of which rwsem implementation that is being used.
128 * It is just a heuristic meant to be called by somebody already holding the
129 * rwsem to see if somebody from an incompatible type is wanting access to the
130 * lock.
131 */
132static inline bool rwsem_is_contended(struct rw_semaphore *sem)
133{
134 return data_race(sem->first_waiter != NULL);
135}
136
137#if defined(CONFIG_DEBUG_RWSEMS) || defined(CONFIG_DETECT_HUNG_TASK_BLOCKER)
138/*
139 * Return just the real task structure pointer of the owner
140 */
141extern struct task_struct *rwsem_owner(struct rw_semaphore *sem);
142
143/*
144 * Return true if the rwsem is owned by a reader.
145 */
146extern bool is_rwsem_reader_owned(struct rw_semaphore *sem);
147#endif
148
149#else /* !CONFIG_PREEMPT_RT */
150
151#include <linux/rwbase_rt.h>
152
153context_lock_struct(rw_semaphore) {
154 struct rwbase_rt rwbase;
155#ifdef CONFIG_DEBUG_LOCK_ALLOC
156 struct lockdep_map dep_map;
157#endif
158};
159
160#define __RWSEM_INITIALIZER(name) \
161 { \
162 .rwbase = __RWBASE_INITIALIZER(name), \
163 __RWSEM_DEP_MAP_INIT(name) \
164 }
165
166#define DECLARE_RWSEM(lockname) \
167 struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
168
169extern void __init_rwsem(struct rw_semaphore *rwsem, const char *name,
170 struct lock_class_key *key);
171
172#define init_rwsem(sem) \
173do { \
174 static struct lock_class_key __key; \
175 \
176 __init_rwsem((sem), #sem, &__key); \
177} while (0)
178
179static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem)
180{
181 return rw_base_is_locked(&sem->rwbase);
182}
183
184static __always_inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
185 __assumes_ctx_lock(sem)
186{
187 WARN_ON(!rwsem_is_locked(sem));
188}
189
190static __always_inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
191 __assumes_ctx_lock(sem)
192{
193 WARN_ON(!rw_base_is_write_locked(&sem->rwbase));
194}
195
196static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
197{
198 return rw_base_is_contended(&sem->rwbase);
199}
200
201#endif /* CONFIG_PREEMPT_RT */
202
203/*
204 * The functions below are the same for all rwsem implementations including
205 * the RT specific variant.
206 */
207
208static inline void rwsem_assert_held(const struct rw_semaphore *sem)
209 __assumes_ctx_lock(sem)
210{
211 if (IS_ENABLED(CONFIG_LOCKDEP))
212 lockdep_assert_held(sem);
213 else
214 rwsem_assert_held_nolockdep(sem);
215}
216
217static inline void rwsem_assert_held_write(const struct rw_semaphore *sem)
218 __assumes_ctx_lock(sem)
219{
220 if (IS_ENABLED(CONFIG_LOCKDEP))
221 lockdep_assert_held_write(sem);
222 else
223 rwsem_assert_held_write_nolockdep(sem);
224}
225
226/*
227 * lock for reading
228 */
229extern void down_read(struct rw_semaphore *sem) __acquires_shared(sem);
230extern int __must_check down_read_interruptible(struct rw_semaphore *sem) __cond_acquires_shared(0, sem);
231extern int __must_check down_read_killable(struct rw_semaphore *sem) __cond_acquires_shared(0, sem);
232
233/*
234 * trylock for reading -- returns 1 if successful, 0 if contention
235 */
236extern int down_read_trylock(struct rw_semaphore *sem) __cond_acquires_shared(true, sem);
237
238/*
239 * lock for writing
240 */
241extern void down_write(struct rw_semaphore *sem) __acquires(sem);
242extern int __must_check down_write_killable(struct rw_semaphore *sem) __cond_acquires(0, sem);
243
244/*
245 * trylock for writing -- returns 1 if successful, 0 if contention
246 */
247extern int down_write_trylock(struct rw_semaphore *sem) __cond_acquires(true, sem);
248
249/*
250 * release a read lock
251 */
252extern void up_read(struct rw_semaphore *sem) __releases_shared(sem);
253
254/*
255 * release a write lock
256 */
257extern void up_write(struct rw_semaphore *sem) __releases(sem);
258
259DEFINE_LOCK_GUARD_1(rwsem_read, struct rw_semaphore, down_read(_T->lock), up_read(_T->lock))
260DEFINE_LOCK_GUARD_1_COND(rwsem_read, _try, down_read_trylock(_T->lock))
261DEFINE_LOCK_GUARD_1_COND(rwsem_read, _intr, down_read_interruptible(_T->lock), _RET == 0)
262
263DECLARE_LOCK_GUARD_1_ATTRS(rwsem_read, __acquires_shared(_T), __releases_shared(*(struct rw_semaphore **)_T))
264#define class_rwsem_read_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_read, _T)
265DECLARE_LOCK_GUARD_1_ATTRS(rwsem_read_try, __acquires_shared(_T), __releases_shared(*(struct rw_semaphore **)_T))
266#define class_rwsem_read_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_read_try, _T)
267DECLARE_LOCK_GUARD_1_ATTRS(rwsem_read_intr, __acquires_shared(_T), __releases_shared(*(struct rw_semaphore **)_T))
268#define class_rwsem_read_intr_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_read_intr, _T)
269
270DEFINE_LOCK_GUARD_1(rwsem_write, struct rw_semaphore, down_write(_T->lock), up_write(_T->lock))
271DEFINE_LOCK_GUARD_1_COND(rwsem_write, _try, down_write_trylock(_T->lock))
272DEFINE_LOCK_GUARD_1_COND(rwsem_write, _kill, down_write_killable(_T->lock), _RET == 0)
273
274DECLARE_LOCK_GUARD_1_ATTRS(rwsem_write, __acquires(_T), __releases(*(struct rw_semaphore **)_T))
275#define class_rwsem_write_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_write, _T)
276DECLARE_LOCK_GUARD_1_ATTRS(rwsem_write_try, __acquires(_T), __releases(*(struct rw_semaphore **)_T))
277#define class_rwsem_write_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_write_try, _T)
278DECLARE_LOCK_GUARD_1_ATTRS(rwsem_write_kill, __acquires(_T), __releases(*(struct rw_semaphore **)_T))
279#define class_rwsem_write_kill_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_write_kill, _T)
280
281DEFINE_LOCK_GUARD_1(rwsem_init, struct rw_semaphore, init_rwsem(_T->lock), /* */)
282DECLARE_LOCK_GUARD_1_ATTRS(rwsem_init, __acquires(_T), __releases(*(struct rw_semaphore **)_T))
283#define class_rwsem_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_init, _T)
284
285/*
286 * downgrade write lock to read lock
287 */
288extern void downgrade_write(struct rw_semaphore *sem) __releases(sem) __acquires_shared(sem);
289
290#ifdef CONFIG_DEBUG_LOCK_ALLOC
291/*
292 * nested locking. NOTE: rwsems are not allowed to recurse
293 * (which occurs if the same task tries to acquire the same
294 * lock instance multiple times), but multiple locks of the
295 * same lock class might be taken, if the order of the locks
296 * is always the same. This ordering rule can be expressed
297 * to lockdep via the _nested() APIs, but enumerating the
298 * subclasses that are used. (If the nesting relationship is
299 * static then another method for expressing nested locking is
300 * the explicit definition of lock class keys and the use of
301 * lockdep_set_class() at lock initialization time.
302 * See Documentation/locking/lockdep-design.rst for more details.)
303 */
304extern void down_read_nested(struct rw_semaphore *sem, int subclass) __acquires_shared(sem);
305extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass) __cond_acquires_shared(0, sem);
306extern void down_write_nested(struct rw_semaphore *sem, int subclass) __acquires(sem);
307extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass) __cond_acquires(0, sem);
308extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock) __acquires(sem);
309
310# define down_write_nest_lock(sem, nest_lock) \
311do { \
312 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
313 _down_write_nest_lock(sem, &(nest_lock)->dep_map); \
314} while (0)
315
316/*
317 * Take/release a lock when not the owner will release it.
318 *
319 * [ This API should be avoided as much as possible - the
320 * proper abstraction for this case is completions. ]
321 */
322extern void down_read_non_owner(struct rw_semaphore *sem) __acquires_shared(sem);
323extern void up_read_non_owner(struct rw_semaphore *sem) __releases_shared(sem);
324#else
325# define down_read_nested(sem, subclass) down_read(sem)
326# define down_read_killable_nested(sem, subclass) down_read_killable(sem)
327# define down_write_nest_lock(sem, nest_lock) down_write(sem)
328# define down_write_nested(sem, subclass) down_write(sem)
329# define down_write_killable_nested(sem, subclass) down_write_killable(sem)
330# define down_read_non_owner(sem) down_read(sem)
331# define up_read_non_owner(sem) up_read(sem)
332#endif
333
334#endif /* _LINUX_RWSEM_H */