Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_LOCAL_LOCK_H
3# error "Do not include directly, include linux/local_lock.h"
4#endif
5
6#include <linux/percpu-defs.h>
7#include <linux/irqflags.h>
8#include <linux/lockdep.h>
9#include <linux/debug_locks.h>
10#include <asm/current.h>
11
12#ifndef CONFIG_PREEMPT_RT
13
14context_lock_struct(local_lock) {
15#ifdef CONFIG_DEBUG_LOCK_ALLOC
16 struct lockdep_map dep_map;
17 struct task_struct *owner;
18#endif
19};
20typedef struct local_lock local_lock_t;
21
22/* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */
23context_lock_struct(local_trylock) {
24#ifdef CONFIG_DEBUG_LOCK_ALLOC
25 struct lockdep_map dep_map;
26 struct task_struct *owner;
27#endif
28 u8 acquired;
29};
30typedef struct local_trylock local_trylock_t;
31
32#ifdef CONFIG_DEBUG_LOCK_ALLOC
33# define LOCAL_LOCK_DEBUG_INIT(lockname) \
34 .dep_map = { \
35 .name = #lockname, \
36 .wait_type_inner = LD_WAIT_CONFIG, \
37 .lock_type = LD_LOCK_PERCPU, \
38 }, \
39 .owner = NULL,
40
41# define LOCAL_TRYLOCK_DEBUG_INIT(lockname) \
42 LOCAL_LOCK_DEBUG_INIT(lockname)
43
44static inline void local_lock_acquire(local_lock_t *l)
45{
46 lock_map_acquire(&l->dep_map);
47 DEBUG_LOCKS_WARN_ON(l->owner);
48 l->owner = current;
49}
50
51static inline void local_trylock_acquire(local_lock_t *l)
52{
53 lock_map_acquire_try(&l->dep_map);
54 DEBUG_LOCKS_WARN_ON(l->owner);
55 l->owner = current;
56}
57
58static inline void local_lock_release(local_lock_t *l)
59{
60 DEBUG_LOCKS_WARN_ON(l->owner != current);
61 l->owner = NULL;
62 lock_map_release(&l->dep_map);
63}
64
65static inline void local_lock_debug_init(local_lock_t *l)
66{
67 l->owner = NULL;
68}
69#else /* CONFIG_DEBUG_LOCK_ALLOC */
70# define LOCAL_LOCK_DEBUG_INIT(lockname)
71# define LOCAL_TRYLOCK_DEBUG_INIT(lockname)
72static inline void local_lock_acquire(local_lock_t *l) { }
73static inline void local_trylock_acquire(local_lock_t *l) { }
74static inline void local_lock_release(local_lock_t *l) { }
75static inline void local_lock_debug_init(local_lock_t *l) { }
76#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
77
78#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
79#define INIT_LOCAL_TRYLOCK(lockname) { LOCAL_TRYLOCK_DEBUG_INIT(lockname) }
80
81#define __local_lock_init(lock) \
82do { \
83 static struct lock_class_key __key; \
84 \
85 debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
86 lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
87 0, LD_WAIT_CONFIG, LD_WAIT_INV, \
88 LD_LOCK_PERCPU); \
89 local_lock_debug_init(lock); \
90} while (0)
91
92#define __local_trylock_init(lock) \
93do { \
94 __local_lock_init((local_lock_t *)lock); \
95} while (0)
96
97#define __spinlock_nested_bh_init(lock) \
98do { \
99 static struct lock_class_key __key; \
100 \
101 debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
102 lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
103 0, LD_WAIT_CONFIG, LD_WAIT_INV, \
104 LD_LOCK_NORMAL); \
105 local_lock_debug_init(lock); \
106} while (0)
107
108#define __local_lock_acquire(lock) \
109 do { \
110 local_trylock_t *__tl; \
111 local_lock_t *__l; \
112 \
113 __l = (local_lock_t *)(lock); \
114 __tl = (local_trylock_t *)__l; \
115 _Generic((lock), \
116 local_trylock_t *: ({ \
117 lockdep_assert(__tl->acquired == 0); \
118 WRITE_ONCE(__tl->acquired, 1); \
119 }), \
120 local_lock_t *: (void)0); \
121 local_lock_acquire(__l); \
122 } while (0)
123
124#define __local_lock(lock) \
125 do { \
126 preempt_disable(); \
127 __local_lock_acquire(lock); \
128 __acquire(lock); \
129 } while (0)
130
131#define __local_lock_irq(lock) \
132 do { \
133 local_irq_disable(); \
134 __local_lock_acquire(lock); \
135 __acquire(lock); \
136 } while (0)
137
138#define __local_lock_irqsave(lock, flags) \
139 do { \
140 local_irq_save(flags); \
141 __local_lock_acquire(lock); \
142 __acquire(lock); \
143 } while (0)
144
145#define __local_trylock(lock) \
146 __try_acquire_ctx_lock(lock, ({ \
147 local_trylock_t *__tl; \
148 \
149 preempt_disable(); \
150 __tl = (lock); \
151 if (READ_ONCE(__tl->acquired)) { \
152 preempt_enable(); \
153 __tl = NULL; \
154 } else { \
155 WRITE_ONCE(__tl->acquired, 1); \
156 local_trylock_acquire( \
157 (local_lock_t *)__tl); \
158 } \
159 !!__tl; \
160 }))
161
162#define __local_trylock_irqsave(lock, flags) \
163 __try_acquire_ctx_lock(lock, ({ \
164 local_trylock_t *__tl; \
165 \
166 local_irq_save(flags); \
167 __tl = (lock); \
168 if (READ_ONCE(__tl->acquired)) { \
169 local_irq_restore(flags); \
170 __tl = NULL; \
171 } else { \
172 WRITE_ONCE(__tl->acquired, 1); \
173 local_trylock_acquire( \
174 (local_lock_t *)__tl); \
175 } \
176 !!__tl; \
177 }))
178
179/* preemption or migration must be disabled before calling __local_lock_is_locked */
180#define __local_lock_is_locked(lock) READ_ONCE(this_cpu_ptr(lock)->acquired)
181
182#define __local_lock_release(lock) \
183 do { \
184 local_trylock_t *__tl; \
185 local_lock_t *__l; \
186 \
187 __l = (local_lock_t *)(lock); \
188 __tl = (local_trylock_t *)__l; \
189 local_lock_release(__l); \
190 _Generic((lock), \
191 local_trylock_t *: ({ \
192 lockdep_assert(__tl->acquired == 1); \
193 WRITE_ONCE(__tl->acquired, 0); \
194 }), \
195 local_lock_t *: (void)0); \
196 } while (0)
197
198#define __local_unlock(lock) \
199 do { \
200 __release(lock); \
201 __local_lock_release(lock); \
202 preempt_enable(); \
203 } while (0)
204
205#define __local_unlock_irq(lock) \
206 do { \
207 __release(lock); \
208 __local_lock_release(lock); \
209 local_irq_enable(); \
210 } while (0)
211
212#define __local_unlock_irqrestore(lock, flags) \
213 do { \
214 __release(lock); \
215 __local_lock_release(lock); \
216 local_irq_restore(flags); \
217 } while (0)
218
219#define __local_lock_nested_bh(lock) \
220 do { \
221 lockdep_assert_in_softirq(); \
222 local_lock_acquire((lock)); \
223 __acquire(lock); \
224 } while (0)
225
226#define __local_unlock_nested_bh(lock) \
227 do { \
228 __release(lock); \
229 local_lock_release((lock)); \
230 } while (0)
231
232#else /* !CONFIG_PREEMPT_RT */
233
234#include <linux/sched.h>
235#include <linux/spinlock.h>
236
237/*
238 * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
239 * critical section while staying preemptible.
240 */
241typedef spinlock_t local_lock_t;
242typedef spinlock_t local_trylock_t;
243
244#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
245#define INIT_LOCAL_TRYLOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
246
247#define __local_lock_init(__l) \
248 do { \
249 local_spin_lock_init((__l)); \
250 } while (0)
251
252#define __local_trylock_init(__l) __local_lock_init(__l)
253
254#define __local_lock(__lock) \
255 do { \
256 migrate_disable(); \
257 spin_lock((__lock)); \
258 } while (0)
259
260#define __local_lock_irq(lock) __local_lock(lock)
261
262#define __local_lock_irqsave(lock, flags) \
263 do { \
264 typecheck(unsigned long, flags); \
265 flags = 0; \
266 __local_lock(lock); \
267 } while (0)
268
269#define __local_unlock(__lock) \
270 do { \
271 spin_unlock((__lock)); \
272 migrate_enable(); \
273 } while (0)
274
275#define __local_unlock_irq(lock) __local_unlock(lock)
276
277#define __local_unlock_irqrestore(lock, flags) __local_unlock(lock)
278
279#define __local_lock_nested_bh(lock) \
280do { \
281 lockdep_assert_in_softirq_func(); \
282 spin_lock((lock)); \
283} while (0)
284
285#define __local_unlock_nested_bh(lock) \
286do { \
287 spin_unlock((lock)); \
288} while (0)
289
290#define __local_trylock(lock) \
291 __try_acquire_ctx_lock(lock, context_unsafe(({ \
292 int __locked; \
293 \
294 if (in_nmi() | in_hardirq()) { \
295 __locked = 0; \
296 } else { \
297 migrate_disable(); \
298 __locked = spin_trylock((lock)); \
299 if (!__locked) \
300 migrate_enable(); \
301 } \
302 __locked; \
303 })))
304
305#define __local_trylock_irqsave(lock, flags) \
306 __try_acquire_ctx_lock(lock, ({ \
307 typecheck(unsigned long, flags); \
308 flags = 0; \
309 __local_trylock(lock); \
310 }))
311
312/* migration must be disabled before calling __local_lock_is_locked */
313#define __local_lock_is_locked(__lock) \
314 (rt_mutex_owner(&this_cpu_ptr(__lock)->lock) == current)
315
316#endif /* CONFIG_PREEMPT_RT */
317
318#if defined(WARN_CONTEXT_ANALYSIS) && !defined(__CHECKER__)
319/*
320 * Because the compiler only knows about the base per-CPU variable, use this
321 * helper function to make the compiler think we lock/unlock the @base variable,
322 * and hide the fact we actually pass the per-CPU instance to lock/unlock
323 * functions.
324 */
325static __always_inline local_lock_t *__this_cpu_local_lock(local_lock_t __percpu *base)
326 __returns_ctx_lock(base) __attribute__((overloadable))
327{
328 return this_cpu_ptr(base);
329}
330#ifndef CONFIG_PREEMPT_RT
331static __always_inline local_trylock_t *__this_cpu_local_lock(local_trylock_t __percpu *base)
332 __returns_ctx_lock(base) __attribute__((overloadable))
333{
334 return this_cpu_ptr(base);
335}
336#endif /* CONFIG_PREEMPT_RT */
337#else /* WARN_CONTEXT_ANALYSIS */
338#define __this_cpu_local_lock(base) this_cpu_ptr(base)
339#endif /* WARN_CONTEXT_ANALYSIS */