Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2#ifndef __LINUX_SPINLOCK_RT_H
3#define __LINUX_SPINLOCK_RT_H
4
5#ifndef __LINUX_INSIDE_SPINLOCK_H
6#error Do not include directly. Use spinlock.h
7#endif
8
9#ifdef CONFIG_DEBUG_LOCK_ALLOC
10extern void __rt_spin_lock_init(spinlock_t *lock, const char *name,
11 struct lock_class_key *key, bool percpu);
12#else
13static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
14 struct lock_class_key *key, bool percpu)
15{
16}
17#endif
18
19#define __spin_lock_init(slock, name, key, percpu) \
20do { \
21 rt_mutex_base_init(&(slock)->lock); \
22 __rt_spin_lock_init(slock, name, key, percpu); \
23} while (0)
24
25#define _spin_lock_init(slock, percpu) \
26do { \
27 static struct lock_class_key __key; \
28 __spin_lock_init(slock, #slock, &__key, percpu); \
29} while (0)
30
31#define spin_lock_init(slock) _spin_lock_init(slock, false)
32#define local_spin_lock_init(slock) _spin_lock_init(slock, true)
33
34extern void rt_spin_lock(spinlock_t *lock) __acquires(lock);
35extern void rt_spin_lock_nested(spinlock_t *lock, int subclass) __acquires(lock);
36extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock) __acquires(lock);
37extern void rt_spin_unlock(spinlock_t *lock) __releases(lock);
38extern void rt_spin_lock_unlock(spinlock_t *lock);
39extern int rt_spin_trylock_bh(spinlock_t *lock) __cond_acquires(true, lock);
40extern int rt_spin_trylock(spinlock_t *lock) __cond_acquires(true, lock);
41
42static __always_inline void spin_lock(spinlock_t *lock)
43 __acquires(lock)
44{
45 rt_spin_lock(lock);
46}
47
48#ifdef CONFIG_LOCKDEP
49# define __spin_lock_nested(lock, subclass) \
50 rt_spin_lock_nested(lock, subclass)
51
52# define __spin_lock_nest_lock(lock, nest_lock) \
53 do { \
54 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
55 rt_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
56 } while (0)
57# define __spin_lock_irqsave_nested(lock, flags, subclass) \
58 do { \
59 typecheck(unsigned long, flags); \
60 flags = 0; \
61 __spin_lock_nested(lock, subclass); \
62 } while (0)
63
64#else
65 /*
66 * Always evaluate the 'subclass' argument to avoid that the compiler
67 * warns about set-but-not-used variables when building with
68 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
69 */
70# define __spin_lock_nested(lock, subclass) spin_lock(((void)(subclass), (lock)))
71# define __spin_lock_nest_lock(lock, subclass) spin_lock(((void)(subclass), (lock)))
72# define __spin_lock_irqsave_nested(lock, flags, subclass) \
73 spin_lock_irqsave(((void)(subclass), (lock)), flags)
74#endif
75
76#define spin_lock_nested(lock, subclass) \
77 __spin_lock_nested(lock, subclass)
78
79#define spin_lock_nest_lock(lock, nest_lock) \
80 __spin_lock_nest_lock(lock, nest_lock)
81
82#define spin_lock_irqsave_nested(lock, flags, subclass) \
83 __spin_lock_irqsave_nested(lock, flags, subclass)
84
85static __always_inline void spin_lock_bh(spinlock_t *lock)
86 __acquires(lock)
87{
88 /* Investigate: Drop bh when blocking ? */
89 local_bh_disable();
90 rt_spin_lock(lock);
91}
92
93static __always_inline void spin_lock_irq(spinlock_t *lock)
94 __acquires(lock)
95{
96 rt_spin_lock(lock);
97}
98
99#define spin_lock_irqsave(lock, flags) \
100 do { \
101 typecheck(unsigned long, flags); \
102 flags = 0; \
103 spin_lock(lock); \
104 } while (0)
105
106static __always_inline void spin_unlock(spinlock_t *lock)
107 __releases(lock)
108{
109 rt_spin_unlock(lock);
110}
111
112static __always_inline void spin_unlock_bh(spinlock_t *lock)
113 __releases(lock)
114{
115 rt_spin_unlock(lock);
116 local_bh_enable();
117}
118
119static __always_inline void spin_unlock_irq(spinlock_t *lock)
120 __releases(lock)
121{
122 rt_spin_unlock(lock);
123}
124
125static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
126 unsigned long flags)
127 __releases(lock)
128{
129 rt_spin_unlock(lock);
130}
131
132#define spin_trylock(lock) rt_spin_trylock(lock)
133
134#define spin_trylock_bh(lock) rt_spin_trylock_bh(lock)
135
136#define spin_trylock_irq(lock) rt_spin_trylock(lock)
137
138static __always_inline bool _spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
139 __cond_acquires(true, lock)
140{
141 *flags = 0;
142 return rt_spin_trylock(lock);
143}
144#define spin_trylock_irqsave(lock, flags) _spin_trylock_irqsave(lock, &(flags))
145
146#define spin_is_contended(lock) (((void)(lock), 0))
147
148static inline int spin_is_locked(spinlock_t *lock)
149{
150 return rt_mutex_base_is_locked(&lock->lock);
151}
152
153#define assert_spin_locked(lock) BUG_ON(!spin_is_locked(lock))
154
155#include <linux/rwlock_rt.h>
156
157#endif