Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __LINUX_SPINLOCK_H
3#define __LINUX_SPINLOCK_H
4#define __LINUX_INSIDE_SPINLOCK_H
5
6/*
7 * include/linux/spinlock.h - generic spinlock/rwlock declarations
8 *
9 * here's the role of the various spinlock/rwlock related include files:
10 *
11 * on SMP builds:
12 *
13 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
14 * initializers
15 *
16 * linux/spinlock_types_raw:
17 * The raw types and initializers
18 * linux/spinlock_types.h:
19 * defines the generic type and initializers
20 *
21 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
22 * implementations, mostly inline assembly code
23 *
24 * (also included on UP-debug builds:)
25 *
26 * linux/spinlock_api_smp.h:
27 * contains the prototypes for the _spin_*() APIs.
28 *
29 * linux/spinlock.h: builds the final spin_*() APIs.
30 *
31 * on UP builds:
32 *
33 * linux/spinlock_type_up.h:
34 * contains the generic, simplified UP spinlock type.
35 * (which is an empty structure on non-debug builds)
36 *
37 * linux/spinlock_types_raw:
38 * The raw RT types and initializers
39 * linux/spinlock_types.h:
40 * defines the generic type and initializers
41 *
42 * linux/spinlock_up.h:
43 * contains the arch_spin_*()/etc. version of UP
44 * builds. (which are NOPs on non-debug, non-preempt
45 * builds)
46 *
47 * (included on UP-non-debug builds:)
48 *
49 * linux/spinlock_api_up.h:
50 * builds the _spin_*() APIs.
51 *
52 * linux/spinlock.h: builds the final spin_*() APIs.
53 */
54
55#include <linux/typecheck.h>
56#include <linux/preempt.h>
57#include <linux/linkage.h>
58#include <linux/compiler.h>
59#include <linux/irqflags.h>
60#include <linux/thread_info.h>
61#include <linux/stringify.h>
62#include <linux/bottom_half.h>
63#include <linux/lockdep.h>
64#include <linux/cleanup.h>
65#include <asm/barrier.h>
66#include <asm/mmiowb.h>
67
68
69/*
70 * Must define these before including other files, inline functions need them
71 */
72#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
73
74#define LOCK_SECTION_START(extra) \
75 ".subsection 1\n\t" \
76 extra \
77 ".ifndef " LOCK_SECTION_NAME "\n\t" \
78 LOCK_SECTION_NAME ":\n\t" \
79 ".endif\n"
80
81#define LOCK_SECTION_END \
82 ".previous\n\t"
83
84#define __lockfunc __section(".spinlock.text")
85
86/*
87 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
88 */
89#include <linux/spinlock_types.h>
90
91/*
92 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
93 */
94#ifdef CONFIG_SMP
95# include <asm/spinlock.h>
96#else
97# include <linux/spinlock_up.h>
98#endif
99
100#ifdef CONFIG_DEBUG_SPINLOCK
101 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
102 struct lock_class_key *key, short inner);
103
104# define raw_spin_lock_init(lock) \
105do { \
106 static struct lock_class_key __key; \
107 \
108 __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
109} while (0)
110
111#else
112# define raw_spin_lock_init(lock) \
113 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
114#endif
115
116#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
117
118#ifdef arch_spin_is_contended
119#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
120#else
121#define raw_spin_is_contended(lock) (((void)(lock), 0))
122#endif /*arch_spin_is_contended*/
123
124/*
125 * smp_mb__after_spinlock() provides the equivalent of a full memory barrier
126 * between program-order earlier lock acquisitions and program-order later
127 * memory accesses.
128 *
129 * This guarantees that the following two properties hold:
130 *
131 * 1) Given the snippet:
132 *
133 * { X = 0; Y = 0; }
134 *
135 * CPU0 CPU1
136 *
137 * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1);
138 * spin_lock(S); smp_mb();
139 * smp_mb__after_spinlock(); r1 = READ_ONCE(X);
140 * r0 = READ_ONCE(Y);
141 * spin_unlock(S);
142 *
143 * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0)
144 * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments
145 * preceding the call to smp_mb__after_spinlock() in __schedule() and in
146 * try_to_wake_up().
147 *
148 * 2) Given the snippet:
149 *
150 * { X = 0; Y = 0; }
151 *
152 * CPU0 CPU1 CPU2
153 *
154 * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y);
155 * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb();
156 * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X);
157 * WRITE_ONCE(Y, 1);
158 * spin_unlock(S);
159 *
160 * it is forbidden that CPU0's critical section executes before CPU1's
161 * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1)
162 * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments
163 * preceding the calls to smp_rmb() in try_to_wake_up() for similar
164 * snippets but "projected" onto two CPUs.
165 *
166 * Property (2) upgrades the lock to an RCsc lock.
167 *
168 * Since most load-store architectures implement ACQUIRE with an smp_mb() after
169 * the LL/SC loop, they need no further barriers. Similarly all our TSO
170 * architectures imply an smp_mb() for each atomic instruction and equally don't
171 * need more.
172 *
173 * Architectures that can implement ACQUIRE better need to take care.
174 */
175#ifndef smp_mb__after_spinlock
176#define smp_mb__after_spinlock() kcsan_mb()
177#endif
178
179#ifdef CONFIG_DEBUG_SPINLOCK
180 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
181 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
182 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
183#else
184static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
185{
186 __acquire(lock);
187 arch_spin_lock(&lock->raw_lock);
188 mmiowb_spin_lock();
189}
190
191static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
192{
193 int ret = arch_spin_trylock(&(lock)->raw_lock);
194
195 if (ret)
196 mmiowb_spin_lock();
197
198 return ret;
199}
200
201static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
202{
203 mmiowb_spin_unlock();
204 arch_spin_unlock(&lock->raw_lock);
205 __release(lock);
206}
207#endif
208
209/*
210 * Define the various spin_lock methods. Note we define these
211 * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The
212 * various methods are defined as nops in the case they are not
213 * required.
214 */
215#define raw_spin_trylock(lock) _raw_spin_trylock(lock)
216
217#define raw_spin_lock(lock) _raw_spin_lock(lock)
218
219#ifdef CONFIG_DEBUG_LOCK_ALLOC
220# define raw_spin_lock_nested(lock, subclass) \
221 _raw_spin_lock_nested(lock, subclass)
222
223# define raw_spin_lock_nest_lock(lock, nest_lock) \
224 do { \
225 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
226 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
227 } while (0)
228#else
229/*
230 * Always evaluate the 'subclass' argument to avoid that the compiler
231 * warns about set-but-not-used variables when building with
232 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
233 */
234# define raw_spin_lock_nested(lock, subclass) \
235 _raw_spin_lock(((void)(subclass), (lock)))
236# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
237#endif
238
239#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
240
241#define raw_spin_lock_irqsave(lock, flags) \
242 do { \
243 typecheck(unsigned long, flags); \
244 flags = _raw_spin_lock_irqsave(lock); \
245 } while (0)
246
247#ifdef CONFIG_DEBUG_LOCK_ALLOC
248#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
249 do { \
250 typecheck(unsigned long, flags); \
251 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
252 } while (0)
253#else
254#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
255 do { \
256 typecheck(unsigned long, flags); \
257 flags = _raw_spin_lock_irqsave(lock); \
258 } while (0)
259#endif
260
261#else
262
263#define raw_spin_lock_irqsave(lock, flags) \
264 do { \
265 typecheck(unsigned long, flags); \
266 _raw_spin_lock_irqsave(lock, flags); \
267 } while (0)
268
269#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
270 raw_spin_lock_irqsave(lock, flags)
271
272#endif
273
274#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
275#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
276#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
277#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
278
279#define raw_spin_unlock_irqrestore(lock, flags) \
280 do { \
281 typecheck(unsigned long, flags); \
282 _raw_spin_unlock_irqrestore(lock, flags); \
283 } while (0)
284#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
285
286#define raw_spin_trylock_bh(lock) _raw_spin_trylock_bh(lock)
287
288#define raw_spin_trylock_irq(lock) _raw_spin_trylock_irq(lock)
289
290#define raw_spin_trylock_irqsave(lock, flags) _raw_spin_trylock_irqsave(lock, &(flags))
291
292#ifndef CONFIG_PREEMPT_RT
293/* Include rwlock functions for !RT */
294#include <linux/rwlock.h>
295#endif
296
297/*
298 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
299 */
300#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
301# include <linux/spinlock_api_smp.h>
302#else
303# include <linux/spinlock_api_up.h>
304#endif
305
306/* Non PREEMPT_RT kernel, map to raw spinlocks: */
307#ifndef CONFIG_PREEMPT_RT
308
309/*
310 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
311 */
312
313static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
314{
315 return &lock->rlock;
316}
317
318#ifdef CONFIG_DEBUG_SPINLOCK
319
320# define spin_lock_init(lock) \
321do { \
322 static struct lock_class_key __key; \
323 \
324 __raw_spin_lock_init(spinlock_check(lock), \
325 #lock, &__key, LD_WAIT_CONFIG); \
326} while (0)
327
328#else
329
330# define spin_lock_init(_lock) \
331do { \
332 spinlock_check(_lock); \
333 *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \
334} while (0)
335
336#endif
337
338static __always_inline void spin_lock(spinlock_t *lock)
339 __acquires(lock) __no_context_analysis
340{
341 raw_spin_lock(&lock->rlock);
342}
343
344static __always_inline void spin_lock_bh(spinlock_t *lock)
345 __acquires(lock) __no_context_analysis
346{
347 raw_spin_lock_bh(&lock->rlock);
348}
349
350static __always_inline int spin_trylock(spinlock_t *lock)
351 __cond_acquires(true, lock) __no_context_analysis
352{
353 return raw_spin_trylock(&lock->rlock);
354}
355
356#define spin_lock_nested(lock, subclass) \
357do { \
358 raw_spin_lock_nested(spinlock_check(lock), subclass); \
359 __release(spinlock_check(lock)); __acquire(lock); \
360} while (0)
361
362#define spin_lock_nest_lock(lock, nest_lock) \
363do { \
364 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
365 __release(spinlock_check(lock)); __acquire(lock); \
366} while (0)
367
368static __always_inline void spin_lock_irq(spinlock_t *lock)
369 __acquires(lock) __no_context_analysis
370{
371 raw_spin_lock_irq(&lock->rlock);
372}
373
374#define spin_lock_irqsave(lock, flags) \
375do { \
376 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
377 __release(spinlock_check(lock)); __acquire(lock); \
378} while (0)
379
380#define spin_lock_irqsave_nested(lock, flags, subclass) \
381do { \
382 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
383 __release(spinlock_check(lock)); __acquire(lock); \
384} while (0)
385
386static __always_inline void spin_unlock(spinlock_t *lock)
387 __releases(lock) __no_context_analysis
388{
389 raw_spin_unlock(&lock->rlock);
390}
391
392static __always_inline void spin_unlock_bh(spinlock_t *lock)
393 __releases(lock) __no_context_analysis
394{
395 raw_spin_unlock_bh(&lock->rlock);
396}
397
398static __always_inline void spin_unlock_irq(spinlock_t *lock)
399 __releases(lock) __no_context_analysis
400{
401 raw_spin_unlock_irq(&lock->rlock);
402}
403
404static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
405 __releases(lock) __no_context_analysis
406{
407 raw_spin_unlock_irqrestore(&lock->rlock, flags);
408}
409
410static __always_inline int spin_trylock_bh(spinlock_t *lock)
411 __cond_acquires(true, lock) __no_context_analysis
412{
413 return raw_spin_trylock_bh(&lock->rlock);
414}
415
416static __always_inline int spin_trylock_irq(spinlock_t *lock)
417 __cond_acquires(true, lock) __no_context_analysis
418{
419 return raw_spin_trylock_irq(&lock->rlock);
420}
421
422static __always_inline bool _spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
423 __cond_acquires(true, lock) __no_context_analysis
424{
425 return raw_spin_trylock_irqsave(spinlock_check(lock), *flags);
426}
427#define spin_trylock_irqsave(lock, flags) _spin_trylock_irqsave(lock, &(flags))
428
429/**
430 * spin_is_locked() - Check whether a spinlock is locked.
431 * @lock: Pointer to the spinlock.
432 *
433 * This function is NOT required to provide any memory ordering
434 * guarantees; it could be used for debugging purposes or, when
435 * additional synchronization is needed, accompanied with other
436 * constructs (memory barriers) enforcing the synchronization.
437 *
438 * Returns: 1 if @lock is locked, 0 otherwise.
439 *
440 * Note that the function only tells you that the spinlock is
441 * seen to be locked, not that it is locked on your CPU.
442 *
443 * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n,
444 * the return value is always 0 (see include/linux/spinlock_up.h).
445 * Therefore you should not rely heavily on the return value.
446 */
447static __always_inline int spin_is_locked(spinlock_t *lock)
448{
449 return raw_spin_is_locked(&lock->rlock);
450}
451
452static __always_inline int spin_is_contended(spinlock_t *lock)
453{
454 return raw_spin_is_contended(&lock->rlock);
455}
456
457#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
458
459#else /* !CONFIG_PREEMPT_RT */
460# include <linux/spinlock_rt.h>
461#endif /* CONFIG_PREEMPT_RT */
462
463/*
464 * Does a critical section need to be broken due to another
465 * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
466 * but a general need for low latency)
467 */
468static inline int spin_needbreak(spinlock_t *lock)
469{
470 if (!preempt_model_preemptible())
471 return 0;
472
473 return spin_is_contended(lock);
474}
475
476/*
477 * Check if a rwlock is contended.
478 * Returns non-zero if there is another task waiting on the rwlock.
479 * Returns zero if the lock is not contended or the system / underlying
480 * rwlock implementation does not support contention detection.
481 * Technically does not depend on CONFIG_PREEMPTION, but a general need
482 * for low latency.
483 */
484static inline int rwlock_needbreak(rwlock_t *lock)
485{
486 if (!preempt_model_preemptible())
487 return 0;
488
489 return rwlock_is_contended(lock);
490}
491
492/*
493 * Pull the atomic_t declaration:
494 * (asm-mips/atomic.h needs above definitions)
495 */
496#include <linux/atomic.h>
497/**
498 * atomic_dec_and_lock - lock on reaching reference count zero
499 * @atomic: the atomic counter
500 * @lock: the spinlock in question
501 *
502 * Decrements @atomic by 1. If the result is 0, returns true and locks
503 * @lock. Returns false for all other cases.
504 */
505extern int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) __cond_acquires(true, lock);
506
507extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
508 unsigned long *flags) __cond_acquires(true, lock);
509#define atomic_dec_and_lock_irqsave(atomic, lock, flags) _atomic_dec_and_lock_irqsave(atomic, lock, &(flags))
510
511extern int atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock) __cond_acquires(true, lock);
512
513extern int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
514 unsigned long *flags) __cond_acquires(true, lock);
515#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags))
516
517int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
518 size_t max_size, unsigned int cpu_mult,
519 gfp_t gfp, const char *name,
520 struct lock_class_key *key);
521
522#define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \
523 ({ \
524 static struct lock_class_key key; \
525 int ret; \
526 \
527 ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \
528 cpu_mult, gfp, #locks, &key); \
529 ret; \
530 })
531
532void free_bucket_spinlocks(spinlock_t *locks);
533
534DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
535 raw_spin_lock(_T->lock),
536 raw_spin_unlock(_T->lock))
537DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
538#define class_raw_spinlock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock, _T)
539
540DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock))
541DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
542#define class_raw_spinlock_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_try, _T)
543
544DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
545 raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
546 raw_spin_unlock(_T->lock))
547DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_nested, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
548#define class_raw_spinlock_nested_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_nested, _T)
549
550DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
551 raw_spin_lock_irq(_T->lock),
552 raw_spin_unlock_irq(_T->lock))
553DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irq, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
554#define class_raw_spinlock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irq, _T)
555
556DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))
557DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irq_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
558#define class_raw_spinlock_irq_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irq_try, _T)
559
560DEFINE_LOCK_GUARD_1(raw_spinlock_bh, raw_spinlock_t,
561 raw_spin_lock_bh(_T->lock),
562 raw_spin_unlock_bh(_T->lock))
563DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_bh, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
564#define class_raw_spinlock_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_bh, _T)
565
566DEFINE_LOCK_GUARD_1_COND(raw_spinlock_bh, _try, raw_spin_trylock_bh(_T->lock))
567DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_bh_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
568#define class_raw_spinlock_bh_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_bh_try, _T)
569
570DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
571 raw_spin_lock_irqsave(_T->lock, _T->flags),
572 raw_spin_unlock_irqrestore(_T->lock, _T->flags),
573 unsigned long flags)
574DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
575#define class_raw_spinlock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave, _T)
576
577DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try,
578 raw_spin_trylock_irqsave(_T->lock, _T->flags))
579DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
580#define class_raw_spinlock_irqsave_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave_try, _T)
581
582DEFINE_LOCK_GUARD_1(raw_spinlock_init, raw_spinlock_t, raw_spin_lock_init(_T->lock), /* */)
583DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_init, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
584#define class_raw_spinlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_init, _T)
585
586DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
587 spin_lock(_T->lock),
588 spin_unlock(_T->lock))
589DECLARE_LOCK_GUARD_1_ATTRS(spinlock, __acquires(_T), __releases(*(spinlock_t **)_T))
590#define class_spinlock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock, _T)
591
592DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock))
593DECLARE_LOCK_GUARD_1_ATTRS(spinlock_try, __acquires(_T), __releases(*(spinlock_t **)_T))
594#define class_spinlock_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_try, _T)
595
596DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
597 spin_lock_irq(_T->lock),
598 spin_unlock_irq(_T->lock))
599DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irq, __acquires(_T), __releases(*(spinlock_t **)_T))
600#define class_spinlock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irq, _T)
601
602DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
603 spin_trylock_irq(_T->lock))
604DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irq_try, __acquires(_T), __releases(*(spinlock_t **)_T))
605#define class_spinlock_irq_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irq_try, _T)
606
607DEFINE_LOCK_GUARD_1(spinlock_bh, spinlock_t,
608 spin_lock_bh(_T->lock),
609 spin_unlock_bh(_T->lock))
610DECLARE_LOCK_GUARD_1_ATTRS(spinlock_bh, __acquires(_T), __releases(*(spinlock_t **)_T))
611#define class_spinlock_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_bh, _T)
612
613DEFINE_LOCK_GUARD_1_COND(spinlock_bh, _try,
614 spin_trylock_bh(_T->lock))
615DECLARE_LOCK_GUARD_1_ATTRS(spinlock_bh_try, __acquires(_T), __releases(*(spinlock_t **)_T))
616#define class_spinlock_bh_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_bh_try, _T)
617
618DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
619 spin_lock_irqsave(_T->lock, _T->flags),
620 spin_unlock_irqrestore(_T->lock, _T->flags),
621 unsigned long flags)
622DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irqsave, __acquires(_T), __releases(*(spinlock_t **)_T))
623#define class_spinlock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irqsave, _T)
624
625DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try,
626 spin_trylock_irqsave(_T->lock, _T->flags))
627DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irqsave_try, __acquires(_T), __releases(*(spinlock_t **)_T))
628#define class_spinlock_irqsave_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irqsave_try, _T)
629
630DEFINE_LOCK_GUARD_1(spinlock_init, spinlock_t, spin_lock_init(_T->lock), /* */)
631DECLARE_LOCK_GUARD_1_ATTRS(spinlock_init, __acquires(_T), __releases(*(spinlock_t **)_T))
632#define class_spinlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_init, _T)
633
634DEFINE_LOCK_GUARD_1(read_lock, rwlock_t,
635 read_lock(_T->lock),
636 read_unlock(_T->lock))
637DECLARE_LOCK_GUARD_1_ATTRS(read_lock, __acquires(_T), __releases(*(rwlock_t **)_T))
638#define class_read_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(read_lock, _T)
639
640DEFINE_LOCK_GUARD_1(read_lock_irq, rwlock_t,
641 read_lock_irq(_T->lock),
642 read_unlock_irq(_T->lock))
643DECLARE_LOCK_GUARD_1_ATTRS(read_lock_irq, __acquires(_T), __releases(*(rwlock_t **)_T))
644#define class_read_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(read_lock_irq, _T)
645
646DEFINE_LOCK_GUARD_1(read_lock_irqsave, rwlock_t,
647 read_lock_irqsave(_T->lock, _T->flags),
648 read_unlock_irqrestore(_T->lock, _T->flags),
649 unsigned long flags)
650DECLARE_LOCK_GUARD_1_ATTRS(read_lock_irqsave, __acquires(_T), __releases(*(rwlock_t **)_T))
651#define class_read_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(read_lock_irqsave, _T)
652
653DEFINE_LOCK_GUARD_1(write_lock, rwlock_t,
654 write_lock(_T->lock),
655 write_unlock(_T->lock))
656DECLARE_LOCK_GUARD_1_ATTRS(write_lock, __acquires(_T), __releases(*(rwlock_t **)_T))
657#define class_write_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock, _T)
658
659DEFINE_LOCK_GUARD_1(write_lock_irq, rwlock_t,
660 write_lock_irq(_T->lock),
661 write_unlock_irq(_T->lock))
662DECLARE_LOCK_GUARD_1_ATTRS(write_lock_irq, __acquires(_T), __releases(*(rwlock_t **)_T))
663#define class_write_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock_irq, _T)
664
665DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t,
666 write_lock_irqsave(_T->lock, _T->flags),
667 write_unlock_irqrestore(_T->lock, _T->flags),
668 unsigned long flags)
669DECLARE_LOCK_GUARD_1_ATTRS(write_lock_irqsave, __acquires(_T), __releases(*(rwlock_t **)_T))
670#define class_write_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock_irqsave, _T)
671
672DEFINE_LOCK_GUARD_1(rwlock_init, rwlock_t, rwlock_init(_T->lock), /* */)
673DECLARE_LOCK_GUARD_1_ATTRS(rwlock_init, __acquires(_T), __releases(*(rwlock_t **)_T))
674#define class_rwlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwlock_init, _T)
675
676#undef __LINUX_INSIDE_SPINLOCK_H
677#endif /* __LINUX_SPINLOCK_H */