Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 4c2ed2a3dbda5cad4d7b2f5f394c91522abbaa92 216 lines 6.2 kB view raw
1#ifndef __LINUX_SPINLOCK_API_SMP_H 2#define __LINUX_SPINLOCK_API_SMP_H 3 4#ifndef __LINUX_INSIDE_SPINLOCK_H 5# error "Please do not include this file directly." 6#endif 7 8/* 9 * include/linux/spinlock_api_smp.h 10 * 11 * spinlock API declarations on SMP (and debug) 12 * (implemented in kernel/spinlock.c) 13 * 14 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar 15 * Released under the General Public License (GPL). 16 */ 17 18int in_lock_functions(unsigned long addr); 19 20#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x)) 21 22void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 23void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) 24 __acquires(lock); 25void __lockfunc 26_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) 27 __acquires(lock); 28void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); 29void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) 30 __acquires(lock); 31 32unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) 33 __acquires(lock); 34unsigned long __lockfunc 35_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) 36 __acquires(lock); 37int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) __cond_acquires(true, lock); 38int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) __cond_acquires(true, lock); 39void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); 40void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); 41void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); 42void __lockfunc 43_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) 44 __releases(lock); 45 46#ifdef CONFIG_INLINE_SPIN_LOCK 47#define _raw_spin_lock(lock) __raw_spin_lock(lock) 48#endif 49 50#ifdef CONFIG_INLINE_SPIN_LOCK_BH 51#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock) 52#endif 53 54#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ 55#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock) 56#endif 57 58#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE 59#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock) 60#endif 61 62#ifdef CONFIG_INLINE_SPIN_TRYLOCK 63#define _raw_spin_trylock(lock) __raw_spin_trylock(lock) 64#endif 65 66#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH 67#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock) 68#endif 69 70#ifndef CONFIG_UNINLINE_SPIN_UNLOCK 71#define _raw_spin_unlock(lock) __raw_spin_unlock(lock) 72#endif 73 74#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH 75#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock) 76#endif 77 78#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ 79#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock) 80#endif 81 82#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE 83#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags) 84#endif 85 86static inline int __raw_spin_trylock(raw_spinlock_t *lock) 87 __cond_acquires(true, lock) 88{ 89 preempt_disable(); 90 if (do_raw_spin_trylock(lock)) { 91 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); 92 return 1; 93 } 94 preempt_enable(); 95 return 0; 96} 97 98static __always_inline bool _raw_spin_trylock_irq(raw_spinlock_t *lock) 99 __cond_acquires(true, lock) 100{ 101 local_irq_disable(); 102 if (_raw_spin_trylock(lock)) 103 return true; 104 local_irq_enable(); 105 return false; 106} 107 108static __always_inline bool _raw_spin_trylock_irqsave(raw_spinlock_t *lock, unsigned long *flags) 109 __cond_acquires(true, lock) 110{ 111 local_irq_save(*flags); 112 if (_raw_spin_trylock(lock)) 113 return true; 114 local_irq_restore(*flags); 115 return false; 116} 117 118/* 119 * If lockdep is enabled then we use the non-preemption spin-ops 120 * even on CONFIG_PREEMPTION, because lockdep assumes that interrupts are 121 * not re-enabled during lock-acquire (which the preempt-spin-ops do): 122 */ 123#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) 124 125static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) 126 __acquires(lock) __no_context_analysis 127{ 128 unsigned long flags; 129 130 local_irq_save(flags); 131 preempt_disable(); 132 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 133 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); 134 return flags; 135} 136 137static inline void __raw_spin_lock_irq(raw_spinlock_t *lock) 138 __acquires(lock) __no_context_analysis 139{ 140 local_irq_disable(); 141 preempt_disable(); 142 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 143 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); 144} 145 146static inline void __raw_spin_lock_bh(raw_spinlock_t *lock) 147 __acquires(lock) __no_context_analysis 148{ 149 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); 150 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 151 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); 152} 153 154static inline void __raw_spin_lock(raw_spinlock_t *lock) 155 __acquires(lock) __no_context_analysis 156{ 157 preempt_disable(); 158 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 159 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); 160} 161 162#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */ 163 164static inline void __raw_spin_unlock(raw_spinlock_t *lock) 165 __releases(lock) 166{ 167 spin_release(&lock->dep_map, _RET_IP_); 168 do_raw_spin_unlock(lock); 169 preempt_enable(); 170} 171 172static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, 173 unsigned long flags) 174 __releases(lock) 175{ 176 spin_release(&lock->dep_map, _RET_IP_); 177 do_raw_spin_unlock(lock); 178 local_irq_restore(flags); 179 preempt_enable(); 180} 181 182static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) 183 __releases(lock) 184{ 185 spin_release(&lock->dep_map, _RET_IP_); 186 do_raw_spin_unlock(lock); 187 local_irq_enable(); 188 preempt_enable(); 189} 190 191static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) 192 __releases(lock) 193{ 194 spin_release(&lock->dep_map, _RET_IP_); 195 do_raw_spin_unlock(lock); 196 __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); 197} 198 199static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) 200 __cond_acquires(true, lock) 201{ 202 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); 203 if (do_raw_spin_trylock(lock)) { 204 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); 205 return 1; 206 } 207 __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); 208 return 0; 209} 210 211/* PREEMPT_RT has its own rwlock implementation */ 212#ifndef CONFIG_PREEMPT_RT 213#include <linux/rwlock_api_smp.h> 214#endif 215 216#endif /* __LINUX_SPINLOCK_API_SMP_H */