Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __LINUX_SPINLOCK_H
3#define __LINUX_SPINLOCK_H
4#define __LINUX_INSIDE_SPINLOCK_H
5
6/*
7 * include/linux/spinlock.h - generic spinlock/rwlock declarations
8 *
9 * here's the role of the various spinlock/rwlock related include files:
10 *
11 * on SMP builds:
12 *
13 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
14 * initializers
15 *
16 * linux/spinlock_types_raw:
17 * The raw types and initializers
18 * linux/spinlock_types.h:
19 * defines the generic type and initializers
20 *
21 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
22 * implementations, mostly inline assembly code
23 *
24 * (also included on UP-debug builds:)
25 *
26 * linux/spinlock_api_smp.h:
27 * contains the prototypes for the _spin_*() APIs.
28 *
29 * linux/spinlock.h: builds the final spin_*() APIs.
30 *
31 * on UP builds:
32 *
33 * linux/spinlock_type_up.h:
34 * contains the generic, simplified UP spinlock type.
35 * (which is an empty structure on non-debug builds)
36 *
37 * linux/spinlock_types_raw:
38 * The raw RT types and initializers
39 * linux/spinlock_types.h:
40 * defines the generic type and initializers
41 *
42 * linux/spinlock_up.h:
43 * contains the arch_spin_*()/etc. version of UP
44 * builds. (which are NOPs on non-debug, non-preempt
45 * builds)
46 *
47 * (included on UP-non-debug builds:)
48 *
49 * linux/spinlock_api_up.h:
50 * builds the _spin_*() APIs.
51 *
52 * linux/spinlock.h: builds the final spin_*() APIs.
53 */
54
55#include <linux/typecheck.h>
56#include <linux/preempt.h>
57#include <linux/linkage.h>
58#include <linux/compiler.h>
59#include <linux/irqflags.h>
60#include <linux/thread_info.h>
61#include <linux/stringify.h>
62#include <linux/bottom_half.h>
63#include <linux/lockdep.h>
64#include <linux/cleanup.h>
65#include <asm/barrier.h>
66#include <asm/mmiowb.h>
67
68
69/*
70 * Must define these before including other files, inline functions need them
71 */
72#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
73
74#define LOCK_SECTION_START(extra) \
75 ".subsection 1\n\t" \
76 extra \
77 ".ifndef " LOCK_SECTION_NAME "\n\t" \
78 LOCK_SECTION_NAME ":\n\t" \
79 ".endif\n"
80
81#define LOCK_SECTION_END \
82 ".previous\n\t"
83
84#define __lockfunc __section(".spinlock.text")
85
86/*
87 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
88 */
89#include <linux/spinlock_types.h>
90
91/*
92 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
93 */
94#ifdef CONFIG_SMP
95# include <asm/spinlock.h>
96#else
97# include <linux/spinlock_up.h>
98#endif
99
100#ifdef CONFIG_DEBUG_SPINLOCK
101 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
102 struct lock_class_key *key, short inner);
103
104# define raw_spin_lock_init(lock) \
105do { \
106 static struct lock_class_key __key; \
107 \
108 __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
109} while (0)
110
111#else
112# define raw_spin_lock_init(lock) \
113 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
114#endif
115
116#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
117
118#ifdef arch_spin_is_contended
119#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
120#else
121#define raw_spin_is_contended(lock) (((void)(lock), 0))
122#endif /*arch_spin_is_contended*/
123
124/*
125 * smp_mb__after_spinlock() provides the equivalent of a full memory barrier
126 * between program-order earlier lock acquisitions and program-order later
127 * memory accesses.
128 *
129 * This guarantees that the following two properties hold:
130 *
131 * 1) Given the snippet:
132 *
133 * { X = 0; Y = 0; }
134 *
135 * CPU0 CPU1
136 *
137 * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1);
138 * spin_lock(S); smp_mb();
139 * smp_mb__after_spinlock(); r1 = READ_ONCE(X);
140 * r0 = READ_ONCE(Y);
141 * spin_unlock(S);
142 *
143 * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0)
144 * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments
145 * preceding the call to smp_mb__after_spinlock() in __schedule() and in
146 * try_to_wake_up().
147 *
148 * 2) Given the snippet:
149 *
150 * { X = 0; Y = 0; }
151 *
152 * CPU0 CPU1 CPU2
153 *
154 * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y);
155 * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb();
156 * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X);
157 * WRITE_ONCE(Y, 1);
158 * spin_unlock(S);
159 *
160 * it is forbidden that CPU0's critical section executes before CPU1's
161 * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1)
162 * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments
163 * preceding the calls to smp_rmb() in try_to_wake_up() for similar
164 * snippets but "projected" onto two CPUs.
165 *
166 * Property (2) upgrades the lock to an RCsc lock.
167 *
168 * Since most load-store architectures implement ACQUIRE with an smp_mb() after
169 * the LL/SC loop, they need no further barriers. Similarly all our TSO
170 * architectures imply an smp_mb() for each atomic instruction and equally don't
171 * need more.
172 *
173 * Architectures that can implement ACQUIRE better need to take care.
174 */
175#ifndef smp_mb__after_spinlock
176#define smp_mb__after_spinlock() kcsan_mb()
177#endif
178
179#ifdef CONFIG_DEBUG_SPINLOCK
180 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
181 extern int do_raw_spin_trylock(raw_spinlock_t *lock) __cond_acquires(true, lock);
182 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
183#else
184static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
185{
186 __acquire(lock);
187 arch_spin_lock(&lock->raw_lock);
188 mmiowb_spin_lock();
189}
190
191static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
192 __cond_acquires(true, lock)
193{
194 int ret = arch_spin_trylock(&(lock)->raw_lock);
195
196 if (ret)
197 mmiowb_spin_lock();
198
199 return ret;
200}
201
202static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
203{
204 mmiowb_spin_unlock();
205 arch_spin_unlock(&lock->raw_lock);
206 __release(lock);
207}
208#endif
209
210/*
211 * Define the various spin_lock methods. Note we define these
212 * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The
213 * various methods are defined as nops in the case they are not
214 * required.
215 */
216#define raw_spin_trylock(lock) _raw_spin_trylock(lock)
217
218#define raw_spin_lock(lock) _raw_spin_lock(lock)
219
220#ifdef CONFIG_DEBUG_LOCK_ALLOC
221# define raw_spin_lock_nested(lock, subclass) \
222 _raw_spin_lock_nested(lock, subclass)
223
224# define raw_spin_lock_nest_lock(lock, nest_lock) \
225 do { \
226 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
227 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
228 } while (0)
229#else
230/*
231 * Always evaluate the 'subclass' argument to avoid that the compiler
232 * warns about set-but-not-used variables when building with
233 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
234 */
235# define raw_spin_lock_nested(lock, subclass) \
236 _raw_spin_lock(((void)(subclass), (lock)))
237# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
238#endif
239
240#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
241
242#define raw_spin_lock_irqsave(lock, flags) \
243 do { \
244 typecheck(unsigned long, flags); \
245 flags = _raw_spin_lock_irqsave(lock); \
246 } while (0)
247
248#ifdef CONFIG_DEBUG_LOCK_ALLOC
249#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
250 do { \
251 typecheck(unsigned long, flags); \
252 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
253 } while (0)
254#else
255#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
256 do { \
257 typecheck(unsigned long, flags); \
258 flags = _raw_spin_lock_irqsave(lock); \
259 } while (0)
260#endif
261
262#else
263
264#define raw_spin_lock_irqsave(lock, flags) \
265 do { \
266 typecheck(unsigned long, flags); \
267 _raw_spin_lock_irqsave(lock, flags); \
268 } while (0)
269
270#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
271 raw_spin_lock_irqsave(lock, flags)
272
273#endif
274
275#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
276#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
277#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
278#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
279
280#define raw_spin_unlock_irqrestore(lock, flags) \
281 do { \
282 typecheck(unsigned long, flags); \
283 _raw_spin_unlock_irqrestore(lock, flags); \
284 } while (0)
285#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
286
287#define raw_spin_trylock_bh(lock) _raw_spin_trylock_bh(lock)
288
289#define raw_spin_trylock_irq(lock) _raw_spin_trylock_irq(lock)
290
291#define raw_spin_trylock_irqsave(lock, flags) _raw_spin_trylock_irqsave(lock, &(flags))
292
293#ifndef CONFIG_PREEMPT_RT
294/* Include rwlock functions for !RT */
295#include <linux/rwlock.h>
296#endif
297
298/*
299 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
300 */
301#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
302# include <linux/spinlock_api_smp.h>
303#else
304# include <linux/spinlock_api_up.h>
305#endif
306
307/* Non PREEMPT_RT kernel, map to raw spinlocks: */
308#ifndef CONFIG_PREEMPT_RT
309
310/*
311 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
312 */
313
314static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
315{
316 return &lock->rlock;
317}
318
319#ifdef CONFIG_DEBUG_SPINLOCK
320
321# define spin_lock_init(lock) \
322do { \
323 static struct lock_class_key __key; \
324 \
325 __raw_spin_lock_init(spinlock_check(lock), \
326 #lock, &__key, LD_WAIT_CONFIG); \
327} while (0)
328
329#else
330
331# define spin_lock_init(_lock) \
332do { \
333 spinlock_check(_lock); \
334 *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \
335} while (0)
336
337#endif
338
339static __always_inline void spin_lock(spinlock_t *lock)
340 __acquires(lock) __no_context_analysis
341{
342 raw_spin_lock(&lock->rlock);
343}
344
345static __always_inline void spin_lock_bh(spinlock_t *lock)
346 __acquires(lock) __no_context_analysis
347{
348 raw_spin_lock_bh(&lock->rlock);
349}
350
351static __always_inline int spin_trylock(spinlock_t *lock)
352 __cond_acquires(true, lock) __no_context_analysis
353{
354 return raw_spin_trylock(&lock->rlock);
355}
356
357#define spin_lock_nested(lock, subclass) \
358do { \
359 raw_spin_lock_nested(spinlock_check(lock), subclass); \
360 __release(spinlock_check(lock)); __acquire(lock); \
361} while (0)
362
363#define spin_lock_nest_lock(lock, nest_lock) \
364do { \
365 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
366 __release(spinlock_check(lock)); __acquire(lock); \
367} while (0)
368
369static __always_inline void spin_lock_irq(spinlock_t *lock)
370 __acquires(lock) __no_context_analysis
371{
372 raw_spin_lock_irq(&lock->rlock);
373}
374
375#define spin_lock_irqsave(lock, flags) \
376do { \
377 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
378 __release(spinlock_check(lock)); __acquire(lock); \
379} while (0)
380
381#define spin_lock_irqsave_nested(lock, flags, subclass) \
382do { \
383 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
384 __release(spinlock_check(lock)); __acquire(lock); \
385} while (0)
386
387static __always_inline void spin_unlock(spinlock_t *lock)
388 __releases(lock) __no_context_analysis
389{
390 raw_spin_unlock(&lock->rlock);
391}
392
393static __always_inline void spin_unlock_bh(spinlock_t *lock)
394 __releases(lock) __no_context_analysis
395{
396 raw_spin_unlock_bh(&lock->rlock);
397}
398
399static __always_inline void spin_unlock_irq(spinlock_t *lock)
400 __releases(lock) __no_context_analysis
401{
402 raw_spin_unlock_irq(&lock->rlock);
403}
404
405static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
406 __releases(lock) __no_context_analysis
407{
408 raw_spin_unlock_irqrestore(&lock->rlock, flags);
409}
410
411static __always_inline int spin_trylock_bh(spinlock_t *lock)
412 __cond_acquires(true, lock) __no_context_analysis
413{
414 return raw_spin_trylock_bh(&lock->rlock);
415}
416
417static __always_inline int spin_trylock_irq(spinlock_t *lock)
418 __cond_acquires(true, lock) __no_context_analysis
419{
420 return raw_spin_trylock_irq(&lock->rlock);
421}
422
423static __always_inline bool _spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
424 __cond_acquires(true, lock) __no_context_analysis
425{
426 return raw_spin_trylock_irqsave(spinlock_check(lock), *flags);
427}
428#define spin_trylock_irqsave(lock, flags) _spin_trylock_irqsave(lock, &(flags))
429
430/**
431 * spin_is_locked() - Check whether a spinlock is locked.
432 * @lock: Pointer to the spinlock.
433 *
434 * This function is NOT required to provide any memory ordering
435 * guarantees; it could be used for debugging purposes or, when
436 * additional synchronization is needed, accompanied with other
437 * constructs (memory barriers) enforcing the synchronization.
438 *
439 * Returns: 1 if @lock is locked, 0 otherwise.
440 *
441 * Note that the function only tells you that the spinlock is
442 * seen to be locked, not that it is locked on your CPU.
443 *
444 * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n,
445 * the return value is always 0 (see include/linux/spinlock_up.h).
446 * Therefore you should not rely heavily on the return value.
447 */
448static __always_inline int spin_is_locked(spinlock_t *lock)
449{
450 return raw_spin_is_locked(&lock->rlock);
451}
452
453static __always_inline int spin_is_contended(spinlock_t *lock)
454{
455 return raw_spin_is_contended(&lock->rlock);
456}
457
458#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
459
460#else /* !CONFIG_PREEMPT_RT */
461# include <linux/spinlock_rt.h>
462#endif /* CONFIG_PREEMPT_RT */
463
464/*
465 * Does a critical section need to be broken due to another
466 * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
467 * but a general need for low latency)
468 */
469static inline int spin_needbreak(spinlock_t *lock)
470{
471 if (!preempt_model_preemptible())
472 return 0;
473
474 return spin_is_contended(lock);
475}
476
477/*
478 * Check if a rwlock is contended.
479 * Returns non-zero if there is another task waiting on the rwlock.
480 * Returns zero if the lock is not contended or the system / underlying
481 * rwlock implementation does not support contention detection.
482 * Technically does not depend on CONFIG_PREEMPTION, but a general need
483 * for low latency.
484 */
485static inline int rwlock_needbreak(rwlock_t *lock)
486{
487 if (!preempt_model_preemptible())
488 return 0;
489
490 return rwlock_is_contended(lock);
491}
492
493/*
494 * Pull the atomic_t declaration:
495 * (asm-mips/atomic.h needs above definitions)
496 */
497#include <linux/atomic.h>
498/**
499 * atomic_dec_and_lock - lock on reaching reference count zero
500 * @atomic: the atomic counter
501 * @lock: the spinlock in question
502 *
503 * Decrements @atomic by 1. If the result is 0, returns true and locks
504 * @lock. Returns false for all other cases.
505 */
506extern int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) __cond_acquires(true, lock);
507
508extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
509 unsigned long *flags) __cond_acquires(true, lock);
510#define atomic_dec_and_lock_irqsave(atomic, lock, flags) _atomic_dec_and_lock_irqsave(atomic, lock, &(flags))
511
512extern int atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock) __cond_acquires(true, lock);
513
514extern int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
515 unsigned long *flags) __cond_acquires(true, lock);
516#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags))
517
518int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
519 size_t max_size, unsigned int cpu_mult,
520 gfp_t gfp, const char *name,
521 struct lock_class_key *key);
522
523#define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \
524 ({ \
525 static struct lock_class_key key; \
526 int ret; \
527 \
528 ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \
529 cpu_mult, gfp, #locks, &key); \
530 ret; \
531 })
532
533void free_bucket_spinlocks(spinlock_t *locks);
534
535DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
536 raw_spin_lock(_T->lock),
537 raw_spin_unlock(_T->lock))
538DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
539#define class_raw_spinlock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock, _T)
540
541DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock))
542DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
543#define class_raw_spinlock_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_try, _T)
544
545DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
546 raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
547 raw_spin_unlock(_T->lock))
548DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_nested, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
549#define class_raw_spinlock_nested_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_nested, _T)
550
551DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
552 raw_spin_lock_irq(_T->lock),
553 raw_spin_unlock_irq(_T->lock))
554DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irq, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
555#define class_raw_spinlock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irq, _T)
556
557DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))
558DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irq_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
559#define class_raw_spinlock_irq_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irq_try, _T)
560
561DEFINE_LOCK_GUARD_1(raw_spinlock_bh, raw_spinlock_t,
562 raw_spin_lock_bh(_T->lock),
563 raw_spin_unlock_bh(_T->lock))
564DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_bh, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
565#define class_raw_spinlock_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_bh, _T)
566
567DEFINE_LOCK_GUARD_1_COND(raw_spinlock_bh, _try, raw_spin_trylock_bh(_T->lock))
568DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_bh_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
569#define class_raw_spinlock_bh_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_bh_try, _T)
570
571DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
572 raw_spin_lock_irqsave(_T->lock, _T->flags),
573 raw_spin_unlock_irqrestore(_T->lock, _T->flags),
574 unsigned long flags)
575DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
576#define class_raw_spinlock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave, _T)
577
578DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try,
579 raw_spin_trylock_irqsave(_T->lock, _T->flags))
580DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
581#define class_raw_spinlock_irqsave_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave_try, _T)
582
583DEFINE_LOCK_GUARD_1(raw_spinlock_init, raw_spinlock_t, raw_spin_lock_init(_T->lock), /* */)
584DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_init, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
585#define class_raw_spinlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_init, _T)
586
587DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
588 spin_lock(_T->lock),
589 spin_unlock(_T->lock))
590DECLARE_LOCK_GUARD_1_ATTRS(spinlock, __acquires(_T), __releases(*(spinlock_t **)_T))
591#define class_spinlock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock, _T)
592
593DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock))
594DECLARE_LOCK_GUARD_1_ATTRS(spinlock_try, __acquires(_T), __releases(*(spinlock_t **)_T))
595#define class_spinlock_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_try, _T)
596
597DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
598 spin_lock_irq(_T->lock),
599 spin_unlock_irq(_T->lock))
600DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irq, __acquires(_T), __releases(*(spinlock_t **)_T))
601#define class_spinlock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irq, _T)
602
603DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
604 spin_trylock_irq(_T->lock))
605DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irq_try, __acquires(_T), __releases(*(spinlock_t **)_T))
606#define class_spinlock_irq_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irq_try, _T)
607
608DEFINE_LOCK_GUARD_1(spinlock_bh, spinlock_t,
609 spin_lock_bh(_T->lock),
610 spin_unlock_bh(_T->lock))
611DECLARE_LOCK_GUARD_1_ATTRS(spinlock_bh, __acquires(_T), __releases(*(spinlock_t **)_T))
612#define class_spinlock_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_bh, _T)
613
614DEFINE_LOCK_GUARD_1_COND(spinlock_bh, _try,
615 spin_trylock_bh(_T->lock))
616DECLARE_LOCK_GUARD_1_ATTRS(spinlock_bh_try, __acquires(_T), __releases(*(spinlock_t **)_T))
617#define class_spinlock_bh_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_bh_try, _T)
618
619DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
620 spin_lock_irqsave(_T->lock, _T->flags),
621 spin_unlock_irqrestore(_T->lock, _T->flags),
622 unsigned long flags)
623DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irqsave, __acquires(_T), __releases(*(spinlock_t **)_T))
624#define class_spinlock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irqsave, _T)
625
626DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try,
627 spin_trylock_irqsave(_T->lock, _T->flags))
628DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irqsave_try, __acquires(_T), __releases(*(spinlock_t **)_T))
629#define class_spinlock_irqsave_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irqsave_try, _T)
630
631DEFINE_LOCK_GUARD_1(spinlock_init, spinlock_t, spin_lock_init(_T->lock), /* */)
632DECLARE_LOCK_GUARD_1_ATTRS(spinlock_init, __acquires(_T), __releases(*(spinlock_t **)_T))
633#define class_spinlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_init, _T)
634
635DEFINE_LOCK_GUARD_1(read_lock, rwlock_t,
636 read_lock(_T->lock),
637 read_unlock(_T->lock))
638DECLARE_LOCK_GUARD_1_ATTRS(read_lock, __acquires(_T), __releases(*(rwlock_t **)_T))
639#define class_read_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(read_lock, _T)
640
641DEFINE_LOCK_GUARD_1(read_lock_irq, rwlock_t,
642 read_lock_irq(_T->lock),
643 read_unlock_irq(_T->lock))
644DECLARE_LOCK_GUARD_1_ATTRS(read_lock_irq, __acquires(_T), __releases(*(rwlock_t **)_T))
645#define class_read_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(read_lock_irq, _T)
646
647DEFINE_LOCK_GUARD_1(read_lock_irqsave, rwlock_t,
648 read_lock_irqsave(_T->lock, _T->flags),
649 read_unlock_irqrestore(_T->lock, _T->flags),
650 unsigned long flags)
651DECLARE_LOCK_GUARD_1_ATTRS(read_lock_irqsave, __acquires(_T), __releases(*(rwlock_t **)_T))
652#define class_read_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(read_lock_irqsave, _T)
653
654DEFINE_LOCK_GUARD_1(write_lock, rwlock_t,
655 write_lock(_T->lock),
656 write_unlock(_T->lock))
657DECLARE_LOCK_GUARD_1_ATTRS(write_lock, __acquires(_T), __releases(*(rwlock_t **)_T))
658#define class_write_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock, _T)
659
660DEFINE_LOCK_GUARD_1(write_lock_irq, rwlock_t,
661 write_lock_irq(_T->lock),
662 write_unlock_irq(_T->lock))
663DECLARE_LOCK_GUARD_1_ATTRS(write_lock_irq, __acquires(_T), __releases(*(rwlock_t **)_T))
664#define class_write_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock_irq, _T)
665
666DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t,
667 write_lock_irqsave(_T->lock, _T->flags),
668 write_unlock_irqrestore(_T->lock, _T->flags),
669 unsigned long flags)
670DECLARE_LOCK_GUARD_1_ATTRS(write_lock_irqsave, __acquires(_T), __releases(*(rwlock_t **)_T))
671#define class_write_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock_irqsave, _T)
672
673DEFINE_LOCK_GUARD_1(rwlock_init, rwlock_t, rwlock_init(_T->lock), /* */)
674DECLARE_LOCK_GUARD_1_ATTRS(rwlock_init, __acquires(_T), __releases(*(rwlock_t **)_T))
675#define class_rwlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwlock_init, _T)
676
677#undef __LINUX_INSIDE_SPINLOCK_H
678#endif /* __LINUX_SPINLOCK_H */