Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

locking/rwlock, spinlock: Support Clang's context analysis

Add support for Clang's context analysis for raw_spinlock_t,
spinlock_t, and rwlock. This wholesale conversion is required because
all three of them are interdependent.

To avoid warnings in constructors, the initialization functions mark a
lock as acquired when initialized before guarded variables.

The test verifies that common patterns do not generate false positives.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20251219154418.3592607-9-elver@google.com

authored by

Marco Elver and committed by
Peter Zijlstra
f16a802d 7c451541

+347 -97
+2 -1
Documentation/dev-tools/context-analysis.rst
··· 78 78 Supported Kernel Primitives 79 79 ~~~~~~~~~~~~~~~~~~~~~~~~~~~ 80 80 81 - .. Currently the following synchronization primitives are supported: 81 + Currently the following synchronization primitives are supported: 82 + `raw_spinlock_t`, `spinlock_t`, `rwlock_t`. 82 83 83 84 For context locks with an initialization function (e.g., `spin_lock_init()`), 84 85 calling this function before initializing any guarded members or globals
+13 -12
include/linux/rwlock.h
··· 22 22 static struct lock_class_key __key; \ 23 23 \ 24 24 __rwlock_init((lock), #lock, &__key); \ 25 + __assume_ctx_lock(lock); \ 25 26 } while (0) 26 27 #else 27 28 # define rwlock_init(lock) \ 28 - do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) 29 + do { *(lock) = __RW_LOCK_UNLOCKED(lock); __assume_ctx_lock(lock); } while (0) 29 30 #endif 30 31 31 32 #ifdef CONFIG_DEBUG_SPINLOCK 32 - extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock); 33 + extern void do_raw_read_lock(rwlock_t *lock) __acquires_shared(lock); 33 34 extern int do_raw_read_trylock(rwlock_t *lock); 34 - extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock); 35 + extern void do_raw_read_unlock(rwlock_t *lock) __releases_shared(lock); 35 36 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock); 36 37 extern int do_raw_write_trylock(rwlock_t *lock); 37 38 extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock); 38 39 #else 39 - # define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0) 40 + # define do_raw_read_lock(rwlock) do {__acquire_shared(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0) 40 41 # define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) 41 - # define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0) 42 + # define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release_shared(lock); } while (0) 42 43 # define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0) 43 44 # define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock) 44 45 # define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0) ··· 50 49 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various 51 50 * methods are defined as nops in the case they are not required. 52 51 */ 53 - #define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock)) 52 + #define read_trylock(lock) __cond_lock_shared(lock, _raw_read_trylock(lock)) 54 53 #define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock)) 55 54 56 55 #define write_lock(lock) _raw_write_lock(lock) ··· 113 112 } while (0) 114 113 #define write_unlock_bh(lock) _raw_write_unlock_bh(lock) 115 114 116 - #define write_trylock_irqsave(lock, flags) \ 117 - ({ \ 118 - local_irq_save(flags); \ 119 - write_trylock(lock) ? \ 120 - 1 : ({ local_irq_restore(flags); 0; }); \ 121 - }) 115 + #define write_trylock_irqsave(lock, flags) \ 116 + __cond_lock(lock, ({ \ 117 + local_irq_save(flags); \ 118 + _raw_write_trylock(lock) ? \ 119 + 1 : ({ local_irq_restore(flags); 0; }); \ 120 + })) 122 121 123 122 #ifdef arch_rwlock_is_contended 124 123 #define rwlock_is_contended(lock) \
+23 -6
include/linux/rwlock_api_smp.h
··· 15 15 * Released under the General Public License (GPL). 16 16 */ 17 17 18 - void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); 18 + void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires_shared(lock); 19 19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); 20 20 void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock); 21 - void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock); 21 + void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires_shared(lock); 22 22 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock); 23 - void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock); 23 + void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires_shared(lock); 24 24 void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock); 25 25 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) 26 26 __acquires(lock); ··· 28 28 __acquires(lock); 29 29 int __lockfunc _raw_read_trylock(rwlock_t *lock); 30 30 int __lockfunc _raw_write_trylock(rwlock_t *lock); 31 - void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock); 31 + void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases_shared(lock); 32 32 void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock); 33 - void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock); 33 + void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases_shared(lock); 34 34 void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock); 35 - void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock); 35 + void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases_shared(lock); 36 36 void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock); 37 37 void __lockfunc 38 38 _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) ··· 145 145 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) 146 146 147 147 static inline void __raw_read_lock(rwlock_t *lock) 148 + __acquires_shared(lock) __no_context_analysis 148 149 { 149 150 preempt_disable(); 150 151 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); ··· 153 152 } 154 153 155 154 static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock) 155 + __acquires_shared(lock) __no_context_analysis 156 156 { 157 157 unsigned long flags; 158 158 ··· 165 163 } 166 164 167 165 static inline void __raw_read_lock_irq(rwlock_t *lock) 166 + __acquires_shared(lock) __no_context_analysis 168 167 { 169 168 local_irq_disable(); 170 169 preempt_disable(); ··· 174 171 } 175 172 176 173 static inline void __raw_read_lock_bh(rwlock_t *lock) 174 + __acquires_shared(lock) __no_context_analysis 177 175 { 178 176 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); 179 177 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); ··· 182 178 } 183 179 184 180 static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock) 181 + __acquires(lock) __no_context_analysis 185 182 { 186 183 unsigned long flags; 187 184 ··· 194 189 } 195 190 196 191 static inline void __raw_write_lock_irq(rwlock_t *lock) 192 + __acquires(lock) __no_context_analysis 197 193 { 198 194 local_irq_disable(); 199 195 preempt_disable(); ··· 203 197 } 204 198 205 199 static inline void __raw_write_lock_bh(rwlock_t *lock) 200 + __acquires(lock) __no_context_analysis 206 201 { 207 202 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); 208 203 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); ··· 211 204 } 212 205 213 206 static inline void __raw_write_lock(rwlock_t *lock) 207 + __acquires(lock) __no_context_analysis 214 208 { 215 209 preempt_disable(); 216 210 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); ··· 219 211 } 220 212 221 213 static inline void __raw_write_lock_nested(rwlock_t *lock, int subclass) 214 + __acquires(lock) __no_context_analysis 222 215 { 223 216 preempt_disable(); 224 217 rwlock_acquire(&lock->dep_map, subclass, 0, _RET_IP_); ··· 229 220 #endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */ 230 221 231 222 static inline void __raw_write_unlock(rwlock_t *lock) 223 + __releases(lock) 232 224 { 233 225 rwlock_release(&lock->dep_map, _RET_IP_); 234 226 do_raw_write_unlock(lock); ··· 237 227 } 238 228 239 229 static inline void __raw_read_unlock(rwlock_t *lock) 230 + __releases_shared(lock) 240 231 { 241 232 rwlock_release(&lock->dep_map, _RET_IP_); 242 233 do_raw_read_unlock(lock); ··· 246 235 247 236 static inline void 248 237 __raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 238 + __releases_shared(lock) 249 239 { 250 240 rwlock_release(&lock->dep_map, _RET_IP_); 251 241 do_raw_read_unlock(lock); ··· 255 243 } 256 244 257 245 static inline void __raw_read_unlock_irq(rwlock_t *lock) 246 + __releases_shared(lock) 258 247 { 259 248 rwlock_release(&lock->dep_map, _RET_IP_); 260 249 do_raw_read_unlock(lock); ··· 264 251 } 265 252 266 253 static inline void __raw_read_unlock_bh(rwlock_t *lock) 254 + __releases_shared(lock) 267 255 { 268 256 rwlock_release(&lock->dep_map, _RET_IP_); 269 257 do_raw_read_unlock(lock); ··· 273 259 274 260 static inline void __raw_write_unlock_irqrestore(rwlock_t *lock, 275 261 unsigned long flags) 262 + __releases(lock) 276 263 { 277 264 rwlock_release(&lock->dep_map, _RET_IP_); 278 265 do_raw_write_unlock(lock); ··· 282 267 } 283 268 284 269 static inline void __raw_write_unlock_irq(rwlock_t *lock) 270 + __releases(lock) 285 271 { 286 272 rwlock_release(&lock->dep_map, _RET_IP_); 287 273 do_raw_write_unlock(lock); ··· 291 275 } 292 276 293 277 static inline void __raw_write_unlock_bh(rwlock_t *lock) 278 + __releases(lock) 294 279 { 295 280 rwlock_release(&lock->dep_map, _RET_IP_); 296 281 do_raw_write_unlock(lock);
+24 -11
include/linux/rwlock_rt.h
··· 22 22 \ 23 23 init_rwbase_rt(&(rwl)->rwbase); \ 24 24 __rt_rwlock_init(rwl, #rwl, &__key); \ 25 + __assume_ctx_lock(rwl); \ 25 26 } while (0) 26 27 27 - extern void rt_read_lock(rwlock_t *rwlock) __acquires(rwlock); 28 + extern void rt_read_lock(rwlock_t *rwlock) __acquires_shared(rwlock); 28 29 extern int rt_read_trylock(rwlock_t *rwlock); 29 - extern void rt_read_unlock(rwlock_t *rwlock) __releases(rwlock); 30 + extern void rt_read_unlock(rwlock_t *rwlock) __releases_shared(rwlock); 30 31 extern void rt_write_lock(rwlock_t *rwlock) __acquires(rwlock); 31 32 extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass) __acquires(rwlock); 32 33 extern int rt_write_trylock(rwlock_t *rwlock); 33 34 extern void rt_write_unlock(rwlock_t *rwlock) __releases(rwlock); 34 35 35 36 static __always_inline void read_lock(rwlock_t *rwlock) 37 + __acquires_shared(rwlock) 36 38 { 37 39 rt_read_lock(rwlock); 38 40 } 39 41 40 42 static __always_inline void read_lock_bh(rwlock_t *rwlock) 43 + __acquires_shared(rwlock) 41 44 { 42 45 local_bh_disable(); 43 46 rt_read_lock(rwlock); 44 47 } 45 48 46 49 static __always_inline void read_lock_irq(rwlock_t *rwlock) 50 + __acquires_shared(rwlock) 47 51 { 48 52 rt_read_lock(rwlock); 49 53 } ··· 59 55 flags = 0; \ 60 56 } while (0) 61 57 62 - #define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) 58 + #define read_trylock(lock) __cond_lock_shared(lock, rt_read_trylock(lock)) 63 59 64 60 static __always_inline void read_unlock(rwlock_t *rwlock) 61 + __releases_shared(rwlock) 65 62 { 66 63 rt_read_unlock(rwlock); 67 64 } 68 65 69 66 static __always_inline void read_unlock_bh(rwlock_t *rwlock) 67 + __releases_shared(rwlock) 70 68 { 71 69 rt_read_unlock(rwlock); 72 70 local_bh_enable(); 73 71 } 74 72 75 73 static __always_inline void read_unlock_irq(rwlock_t *rwlock) 74 + __releases_shared(rwlock) 76 75 { 77 76 rt_read_unlock(rwlock); 78 77 } 79 78 80 79 static __always_inline void read_unlock_irqrestore(rwlock_t *rwlock, 81 80 unsigned long flags) 81 + __releases_shared(rwlock) 82 82 { 83 83 rt_read_unlock(rwlock); 84 84 } 85 85 86 86 static __always_inline void write_lock(rwlock_t *rwlock) 87 + __acquires(rwlock) 87 88 { 88 89 rt_write_lock(rwlock); 89 90 } 90 91 91 92 #ifdef CONFIG_DEBUG_LOCK_ALLOC 92 93 static __always_inline void write_lock_nested(rwlock_t *rwlock, int subclass) 94 + __acquires(rwlock) 93 95 { 94 96 rt_write_lock_nested(rwlock, subclass); 95 97 } ··· 104 94 #endif 105 95 106 96 static __always_inline void write_lock_bh(rwlock_t *rwlock) 97 + __acquires(rwlock) 107 98 { 108 99 local_bh_disable(); 109 100 rt_write_lock(rwlock); 110 101 } 111 102 112 103 static __always_inline void write_lock_irq(rwlock_t *rwlock) 104 + __acquires(rwlock) 113 105 { 114 106 rt_write_lock(rwlock); 115 107 } ··· 126 114 #define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock)) 127 115 128 116 #define write_trylock_irqsave(lock, flags) \ 129 - ({ \ 130 - int __locked; \ 131 - \ 132 - typecheck(unsigned long, flags); \ 133 - flags = 0; \ 134 - __locked = write_trylock(lock); \ 135 - __locked; \ 136 - }) 117 + __cond_lock(lock, ({ \ 118 + typecheck(unsigned long, flags); \ 119 + flags = 0; \ 120 + rt_write_trylock(lock); \ 121 + })) 137 122 138 123 static __always_inline void write_unlock(rwlock_t *rwlock) 124 + __releases(rwlock) 139 125 { 140 126 rt_write_unlock(rwlock); 141 127 } 142 128 143 129 static __always_inline void write_unlock_bh(rwlock_t *rwlock) 130 + __releases(rwlock) 144 131 { 145 132 rt_write_unlock(rwlock); 146 133 local_bh_enable(); 147 134 } 148 135 149 136 static __always_inline void write_unlock_irq(rwlock_t *rwlock) 137 + __releases(rwlock) 150 138 { 151 139 rt_write_unlock(rwlock); 152 140 } 153 141 154 142 static __always_inline void write_unlock_irqrestore(rwlock_t *rwlock, 155 143 unsigned long flags) 144 + __releases(rwlock) 156 145 { 157 146 rt_write_unlock(rwlock); 158 147 }
+6 -4
include/linux/rwlock_types.h
··· 22 22 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar 23 23 * Released under the General Public License (GPL). 24 24 */ 25 - typedef struct { 25 + context_lock_struct(rwlock) { 26 26 arch_rwlock_t raw_lock; 27 27 #ifdef CONFIG_DEBUG_SPINLOCK 28 28 unsigned int magic, owner_cpu; ··· 31 31 #ifdef CONFIG_DEBUG_LOCK_ALLOC 32 32 struct lockdep_map dep_map; 33 33 #endif 34 - } rwlock_t; 34 + }; 35 + typedef struct rwlock rwlock_t; 35 36 36 37 #define RWLOCK_MAGIC 0xdeaf1eed 37 38 ··· 55 54 56 55 #include <linux/rwbase_rt.h> 57 56 58 - typedef struct { 57 + context_lock_struct(rwlock) { 59 58 struct rwbase_rt rwbase; 60 59 atomic_t readers; 61 60 #ifdef CONFIG_DEBUG_LOCK_ALLOC 62 61 struct lockdep_map dep_map; 63 62 #endif 64 - } rwlock_t; 63 + }; 64 + typedef struct rwlock rwlock_t; 65 65 66 66 #define __RWLOCK_RT_INITIALIZER(name) \ 67 67 { \
+77 -16
include/linux/spinlock.h
··· 106 106 static struct lock_class_key __key; \ 107 107 \ 108 108 __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \ 109 + __assume_ctx_lock(lock); \ 109 110 } while (0) 110 111 111 112 #else 112 113 # define raw_spin_lock_init(lock) \ 113 - do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) 114 + do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); __assume_ctx_lock(lock); } while (0) 114 115 #endif 115 116 116 117 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) ··· 287 286 #define raw_spin_trylock_bh(lock) \ 288 287 __cond_lock(lock, _raw_spin_trylock_bh(lock)) 289 288 290 - #define raw_spin_trylock_irq(lock) \ 291 - ({ \ 292 - local_irq_disable(); \ 293 - raw_spin_trylock(lock) ? \ 294 - 1 : ({ local_irq_enable(); 0; }); \ 295 - }) 289 + #define raw_spin_trylock_irq(lock) \ 290 + __cond_lock(lock, ({ \ 291 + local_irq_disable(); \ 292 + _raw_spin_trylock(lock) ? \ 293 + 1 : ({ local_irq_enable(); 0; }); \ 294 + })) 296 295 297 - #define raw_spin_trylock_irqsave(lock, flags) \ 298 - ({ \ 299 - local_irq_save(flags); \ 300 - raw_spin_trylock(lock) ? \ 301 - 1 : ({ local_irq_restore(flags); 0; }); \ 302 - }) 296 + #define raw_spin_trylock_irqsave(lock, flags) \ 297 + __cond_lock(lock, ({ \ 298 + local_irq_save(flags); \ 299 + _raw_spin_trylock(lock) ? \ 300 + 1 : ({ local_irq_restore(flags); 0; }); \ 301 + })) 303 302 304 303 #ifndef CONFIG_PREEMPT_RT 305 304 /* Include rwlock functions for !RT */ ··· 335 334 \ 336 335 __raw_spin_lock_init(spinlock_check(lock), \ 337 336 #lock, &__key, LD_WAIT_CONFIG); \ 337 + __assume_ctx_lock(lock); \ 338 338 } while (0) 339 339 340 340 #else ··· 344 342 do { \ 345 343 spinlock_check(_lock); \ 346 344 *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \ 345 + __assume_ctx_lock(_lock); \ 347 346 } while (0) 348 347 349 348 #endif 350 349 351 350 static __always_inline void spin_lock(spinlock_t *lock) 351 + __acquires(lock) __no_context_analysis 352 352 { 353 353 raw_spin_lock(&lock->rlock); 354 354 } 355 355 356 356 static __always_inline void spin_lock_bh(spinlock_t *lock) 357 + __acquires(lock) __no_context_analysis 357 358 { 358 359 raw_spin_lock_bh(&lock->rlock); 359 360 } 360 361 361 362 static __always_inline int spin_trylock(spinlock_t *lock) 363 + __cond_acquires(lock) __no_context_analysis 362 364 { 363 365 return raw_spin_trylock(&lock->rlock); 364 366 } ··· 370 364 #define spin_lock_nested(lock, subclass) \ 371 365 do { \ 372 366 raw_spin_lock_nested(spinlock_check(lock), subclass); \ 367 + __release(spinlock_check(lock)); __acquire(lock); \ 373 368 } while (0) 374 369 375 370 #define spin_lock_nest_lock(lock, nest_lock) \ 376 371 do { \ 377 372 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ 373 + __release(spinlock_check(lock)); __acquire(lock); \ 378 374 } while (0) 379 375 380 376 static __always_inline void spin_lock_irq(spinlock_t *lock) 377 + __acquires(lock) __no_context_analysis 381 378 { 382 379 raw_spin_lock_irq(&lock->rlock); 383 380 } ··· 388 379 #define spin_lock_irqsave(lock, flags) \ 389 380 do { \ 390 381 raw_spin_lock_irqsave(spinlock_check(lock), flags); \ 382 + __release(spinlock_check(lock)); __acquire(lock); \ 391 383 } while (0) 392 384 393 385 #define spin_lock_irqsave_nested(lock, flags, subclass) \ 394 386 do { \ 395 387 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ 388 + __release(spinlock_check(lock)); __acquire(lock); \ 396 389 } while (0) 397 390 398 391 static __always_inline void spin_unlock(spinlock_t *lock) 392 + __releases(lock) __no_context_analysis 399 393 { 400 394 raw_spin_unlock(&lock->rlock); 401 395 } 402 396 403 397 static __always_inline void spin_unlock_bh(spinlock_t *lock) 398 + __releases(lock) __no_context_analysis 404 399 { 405 400 raw_spin_unlock_bh(&lock->rlock); 406 401 } 407 402 408 403 static __always_inline void spin_unlock_irq(spinlock_t *lock) 404 + __releases(lock) __no_context_analysis 409 405 { 410 406 raw_spin_unlock_irq(&lock->rlock); 411 407 } 412 408 413 409 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 410 + __releases(lock) __no_context_analysis 414 411 { 415 412 raw_spin_unlock_irqrestore(&lock->rlock, flags); 416 413 } 417 414 418 415 static __always_inline int spin_trylock_bh(spinlock_t *lock) 416 + __cond_acquires(lock) __no_context_analysis 419 417 { 420 418 return raw_spin_trylock_bh(&lock->rlock); 421 419 } 422 420 423 421 static __always_inline int spin_trylock_irq(spinlock_t *lock) 422 + __cond_acquires(lock) __no_context_analysis 424 423 { 425 424 return raw_spin_trylock_irq(&lock->rlock); 426 425 } 427 426 428 427 #define spin_trylock_irqsave(lock, flags) \ 429 - ({ \ 430 - raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ 431 - }) 428 + __cond_lock(lock, raw_spin_trylock_irqsave(spinlock_check(lock), flags)) 432 429 433 430 /** 434 431 * spin_is_locked() - Check whether a spinlock is locked. ··· 550 535 DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t, 551 536 raw_spin_lock(_T->lock), 552 537 raw_spin_unlock(_T->lock)) 538 + DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock, __acquires(_T), __releases(*(raw_spinlock_t **)_T)) 539 + #define class_raw_spinlock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock, _T) 553 540 554 541 DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock)) 542 + DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T)) 543 + #define class_raw_spinlock_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_try, _T) 555 544 556 545 DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t, 557 546 raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING), 558 547 raw_spin_unlock(_T->lock)) 548 + DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_nested, __acquires(_T), __releases(*(raw_spinlock_t **)_T)) 549 + #define class_raw_spinlock_nested_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_nested, _T) 559 550 560 551 DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t, 561 552 raw_spin_lock_irq(_T->lock), 562 553 raw_spin_unlock_irq(_T->lock)) 554 + DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irq, __acquires(_T), __releases(*(raw_spinlock_t **)_T)) 555 + #define class_raw_spinlock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irq, _T) 563 556 564 557 DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock)) 558 + DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irq_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T)) 559 + #define class_raw_spinlock_irq_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irq_try, _T) 565 560 566 561 DEFINE_LOCK_GUARD_1(raw_spinlock_bh, raw_spinlock_t, 567 562 raw_spin_lock_bh(_T->lock), 568 563 raw_spin_unlock_bh(_T->lock)) 564 + DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_bh, __acquires(_T), __releases(*(raw_spinlock_t **)_T)) 565 + #define class_raw_spinlock_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_bh, _T) 569 566 570 567 DEFINE_LOCK_GUARD_1_COND(raw_spinlock_bh, _try, raw_spin_trylock_bh(_T->lock)) 568 + DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_bh_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T)) 569 + #define class_raw_spinlock_bh_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_bh_try, _T) 571 570 572 571 DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t, 573 572 raw_spin_lock_irqsave(_T->lock, _T->flags), 574 573 raw_spin_unlock_irqrestore(_T->lock, _T->flags), 575 574 unsigned long flags) 575 + DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave, __acquires(_T), __releases(*(raw_spinlock_t **)_T)) 576 + #define class_raw_spinlock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave, _T) 576 577 577 578 DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try, 578 579 raw_spin_trylock_irqsave(_T->lock, _T->flags)) 580 + DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T)) 581 + #define class_raw_spinlock_irqsave_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave_try, _T) 579 582 580 583 DEFINE_LOCK_GUARD_1(spinlock, spinlock_t, 581 584 spin_lock(_T->lock), 582 585 spin_unlock(_T->lock)) 586 + DECLARE_LOCK_GUARD_1_ATTRS(spinlock, __acquires(_T), __releases(*(spinlock_t **)_T)) 587 + #define class_spinlock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock, _T) 583 588 584 589 DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock)) 590 + DECLARE_LOCK_GUARD_1_ATTRS(spinlock_try, __acquires(_T), __releases(*(spinlock_t **)_T)) 591 + #define class_spinlock_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_try, _T) 585 592 586 593 DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t, 587 594 spin_lock_irq(_T->lock), 588 595 spin_unlock_irq(_T->lock)) 596 + DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irq, __acquires(_T), __releases(*(spinlock_t **)_T)) 597 + #define class_spinlock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irq, _T) 589 598 590 599 DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try, 591 600 spin_trylock_irq(_T->lock)) 601 + DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irq_try, __acquires(_T), __releases(*(spinlock_t **)_T)) 602 + #define class_spinlock_irq_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irq_try, _T) 592 603 593 604 DEFINE_LOCK_GUARD_1(spinlock_bh, spinlock_t, 594 605 spin_lock_bh(_T->lock), 595 606 spin_unlock_bh(_T->lock)) 607 + DECLARE_LOCK_GUARD_1_ATTRS(spinlock_bh, __acquires(_T), __releases(*(spinlock_t **)_T)) 608 + #define class_spinlock_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_bh, _T) 596 609 597 610 DEFINE_LOCK_GUARD_1_COND(spinlock_bh, _try, 598 611 spin_trylock_bh(_T->lock)) 612 + DECLARE_LOCK_GUARD_1_ATTRS(spinlock_bh_try, __acquires(_T), __releases(*(spinlock_t **)_T)) 613 + #define class_spinlock_bh_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_bh_try, _T) 599 614 600 615 DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t, 601 616 spin_lock_irqsave(_T->lock, _T->flags), 602 617 spin_unlock_irqrestore(_T->lock, _T->flags), 603 618 unsigned long flags) 619 + DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irqsave, __acquires(_T), __releases(*(spinlock_t **)_T)) 620 + #define class_spinlock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irqsave, _T) 604 621 605 622 DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try, 606 623 spin_trylock_irqsave(_T->lock, _T->flags)) 624 + DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irqsave_try, __acquires(_T), __releases(*(spinlock_t **)_T)) 625 + #define class_spinlock_irqsave_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irqsave_try, _T) 607 626 608 627 DEFINE_LOCK_GUARD_1(read_lock, rwlock_t, 609 628 read_lock(_T->lock), 610 629 read_unlock(_T->lock)) 630 + DECLARE_LOCK_GUARD_1_ATTRS(read_lock, __acquires(_T), __releases(*(rwlock_t **)_T)) 631 + #define class_read_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(read_lock, _T) 611 632 612 633 DEFINE_LOCK_GUARD_1(read_lock_irq, rwlock_t, 613 634 read_lock_irq(_T->lock), 614 635 read_unlock_irq(_T->lock)) 636 + DECLARE_LOCK_GUARD_1_ATTRS(read_lock_irq, __acquires(_T), __releases(*(rwlock_t **)_T)) 637 + #define class_read_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(read_lock_irq, _T) 615 638 616 639 DEFINE_LOCK_GUARD_1(read_lock_irqsave, rwlock_t, 617 640 read_lock_irqsave(_T->lock, _T->flags), 618 641 read_unlock_irqrestore(_T->lock, _T->flags), 619 642 unsigned long flags) 643 + DECLARE_LOCK_GUARD_1_ATTRS(read_lock_irqsave, __acquires(_T), __releases(*(rwlock_t **)_T)) 644 + #define class_read_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(read_lock_irqsave, _T) 620 645 621 646 DEFINE_LOCK_GUARD_1(write_lock, rwlock_t, 622 647 write_lock(_T->lock), 623 648 write_unlock(_T->lock)) 649 + DECLARE_LOCK_GUARD_1_ATTRS(write_lock, __acquires(_T), __releases(*(rwlock_t **)_T)) 650 + #define class_write_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock, _T) 624 651 625 652 DEFINE_LOCK_GUARD_1(write_lock_irq, rwlock_t, 626 653 write_lock_irq(_T->lock), 627 654 write_unlock_irq(_T->lock)) 655 + DECLARE_LOCK_GUARD_1_ATTRS(write_lock_irq, __acquires(_T), __releases(*(rwlock_t **)_T)) 656 + #define class_write_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock_irq, _T) 628 657 629 658 DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t, 630 659 write_lock_irqsave(_T->lock, _T->flags), 631 660 write_unlock_irqrestore(_T->lock, _T->flags), 632 661 unsigned long flags) 662 + DECLARE_LOCK_GUARD_1_ATTRS(write_lock_irqsave, __acquires(_T), __releases(*(rwlock_t **)_T)) 663 + #define class_write_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock_irqsave, _T) 633 664 634 665 #undef __LINUX_INSIDE_SPINLOCK_H 635 666 #endif /* __LINUX_SPINLOCK_H */
+12 -2
include/linux/spinlock_api_smp.h
··· 34 34 unsigned long __lockfunc 35 35 _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) 36 36 __acquires(lock); 37 - int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock); 38 - int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock); 37 + int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) __cond_acquires(lock); 38 + int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) __cond_acquires(lock); 39 39 void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); 40 40 void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); 41 41 void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); ··· 84 84 #endif 85 85 86 86 static inline int __raw_spin_trylock(raw_spinlock_t *lock) 87 + __cond_acquires(lock) 87 88 { 88 89 preempt_disable(); 89 90 if (do_raw_spin_trylock(lock)) { ··· 103 102 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) 104 103 105 104 static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) 105 + __acquires(lock) __no_context_analysis 106 106 { 107 107 unsigned long flags; 108 108 ··· 115 113 } 116 114 117 115 static inline void __raw_spin_lock_irq(raw_spinlock_t *lock) 116 + __acquires(lock) __no_context_analysis 118 117 { 119 118 local_irq_disable(); 120 119 preempt_disable(); ··· 124 121 } 125 122 126 123 static inline void __raw_spin_lock_bh(raw_spinlock_t *lock) 124 + __acquires(lock) __no_context_analysis 127 125 { 128 126 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); 129 127 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); ··· 132 128 } 133 129 134 130 static inline void __raw_spin_lock(raw_spinlock_t *lock) 131 + __acquires(lock) __no_context_analysis 135 132 { 136 133 preempt_disable(); 137 134 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); ··· 142 137 #endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */ 143 138 144 139 static inline void __raw_spin_unlock(raw_spinlock_t *lock) 140 + __releases(lock) 145 141 { 146 142 spin_release(&lock->dep_map, _RET_IP_); 147 143 do_raw_spin_unlock(lock); ··· 151 145 152 146 static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, 153 147 unsigned long flags) 148 + __releases(lock) 154 149 { 155 150 spin_release(&lock->dep_map, _RET_IP_); 156 151 do_raw_spin_unlock(lock); ··· 160 153 } 161 154 162 155 static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) 156 + __releases(lock) 163 157 { 164 158 spin_release(&lock->dep_map, _RET_IP_); 165 159 do_raw_spin_unlock(lock); ··· 169 161 } 170 162 171 163 static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) 164 + __releases(lock) 172 165 { 173 166 spin_release(&lock->dep_map, _RET_IP_); 174 167 do_raw_spin_unlock(lock); ··· 177 168 } 178 169 179 170 static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) 171 + __cond_acquires(lock) 180 172 { 181 173 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); 182 174 if (do_raw_spin_trylock(lock)) {
+40 -31
include/linux/spinlock_api_up.h
··· 24 24 * flags straight, to suppress compiler warnings of unused lock 25 25 * variables, and to add the proper checker annotations: 26 26 */ 27 - #define ___LOCK(lock) \ 28 - do { __acquire(lock); (void)(lock); } while (0) 27 + #define ___LOCK_void(lock) \ 28 + do { (void)(lock); } while (0) 29 29 30 - #define __LOCK(lock) \ 31 - do { preempt_disable(); ___LOCK(lock); } while (0) 30 + #define ___LOCK_(lock) \ 31 + do { __acquire(lock); ___LOCK_void(lock); } while (0) 32 32 33 - #define __LOCK_BH(lock) \ 34 - do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0) 33 + #define ___LOCK_shared(lock) \ 34 + do { __acquire_shared(lock); ___LOCK_void(lock); } while (0) 35 35 36 - #define __LOCK_IRQ(lock) \ 37 - do { local_irq_disable(); __LOCK(lock); } while (0) 36 + #define __LOCK(lock, ...) \ 37 + do { preempt_disable(); ___LOCK_##__VA_ARGS__(lock); } while (0) 38 38 39 - #define __LOCK_IRQSAVE(lock, flags) \ 40 - do { local_irq_save(flags); __LOCK(lock); } while (0) 39 + #define __LOCK_BH(lock, ...) \ 40 + do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK_##__VA_ARGS__(lock); } while (0) 41 41 42 - #define ___UNLOCK(lock) \ 42 + #define __LOCK_IRQ(lock, ...) \ 43 + do { local_irq_disable(); __LOCK(lock, ##__VA_ARGS__); } while (0) 44 + 45 + #define __LOCK_IRQSAVE(lock, flags, ...) \ 46 + do { local_irq_save(flags); __LOCK(lock, ##__VA_ARGS__); } while (0) 47 + 48 + #define ___UNLOCK_(lock) \ 43 49 do { __release(lock); (void)(lock); } while (0) 44 50 45 - #define __UNLOCK(lock) \ 46 - do { preempt_enable(); ___UNLOCK(lock); } while (0) 51 + #define ___UNLOCK_shared(lock) \ 52 + do { __release_shared(lock); (void)(lock); } while (0) 47 53 48 - #define __UNLOCK_BH(lock) \ 54 + #define __UNLOCK(lock, ...) \ 55 + do { preempt_enable(); ___UNLOCK_##__VA_ARGS__(lock); } while (0) 56 + 57 + #define __UNLOCK_BH(lock, ...) \ 49 58 do { __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); \ 50 - ___UNLOCK(lock); } while (0) 59 + ___UNLOCK_##__VA_ARGS__(lock); } while (0) 51 60 52 - #define __UNLOCK_IRQ(lock) \ 53 - do { local_irq_enable(); __UNLOCK(lock); } while (0) 61 + #define __UNLOCK_IRQ(lock, ...) \ 62 + do { local_irq_enable(); __UNLOCK(lock, ##__VA_ARGS__); } while (0) 54 63 55 - #define __UNLOCK_IRQRESTORE(lock, flags) \ 56 - do { local_irq_restore(flags); __UNLOCK(lock); } while (0) 64 + #define __UNLOCK_IRQRESTORE(lock, flags, ...) \ 65 + do { local_irq_restore(flags); __UNLOCK(lock, ##__VA_ARGS__); } while (0) 57 66 58 67 #define _raw_spin_lock(lock) __LOCK(lock) 59 68 #define _raw_spin_lock_nested(lock, subclass) __LOCK(lock) 60 - #define _raw_read_lock(lock) __LOCK(lock) 69 + #define _raw_read_lock(lock) __LOCK(lock, shared) 61 70 #define _raw_write_lock(lock) __LOCK(lock) 62 71 #define _raw_write_lock_nested(lock, subclass) __LOCK(lock) 63 72 #define _raw_spin_lock_bh(lock) __LOCK_BH(lock) 64 - #define _raw_read_lock_bh(lock) __LOCK_BH(lock) 73 + #define _raw_read_lock_bh(lock) __LOCK_BH(lock, shared) 65 74 #define _raw_write_lock_bh(lock) __LOCK_BH(lock) 66 75 #define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock) 67 - #define _raw_read_lock_irq(lock) __LOCK_IRQ(lock) 76 + #define _raw_read_lock_irq(lock) __LOCK_IRQ(lock, shared) 68 77 #define _raw_write_lock_irq(lock) __LOCK_IRQ(lock) 69 78 #define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) 70 - #define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) 79 + #define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags, shared) 71 80 #define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) 72 - #define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; }) 73 - #define _raw_read_trylock(lock) ({ __LOCK(lock); 1; }) 74 - #define _raw_write_trylock(lock) ({ __LOCK(lock); 1; }) 75 - #define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) 81 + #define _raw_spin_trylock(lock) ({ __LOCK(lock, void); 1; }) 82 + #define _raw_read_trylock(lock) ({ __LOCK(lock, void); 1; }) 83 + #define _raw_write_trylock(lock) ({ __LOCK(lock, void); 1; }) 84 + #define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock, void); 1; }) 76 85 #define _raw_spin_unlock(lock) __UNLOCK(lock) 77 - #define _raw_read_unlock(lock) __UNLOCK(lock) 86 + #define _raw_read_unlock(lock) __UNLOCK(lock, shared) 78 87 #define _raw_write_unlock(lock) __UNLOCK(lock) 79 88 #define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock) 80 89 #define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock) 81 - #define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock) 90 + #define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock, shared) 82 91 #define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock) 83 - #define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock) 92 + #define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock, shared) 84 93 #define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock) 85 94 #define _raw_spin_unlock_irqrestore(lock, flags) \ 86 95 __UNLOCK_IRQRESTORE(lock, flags) 87 96 #define _raw_read_unlock_irqrestore(lock, flags) \ 88 - __UNLOCK_IRQRESTORE(lock, flags) 97 + __UNLOCK_IRQRESTORE(lock, flags, shared) 89 98 #define _raw_write_unlock_irqrestore(lock, flags) \ 90 99 __UNLOCK_IRQRESTORE(lock, flags) 91 100
+13 -8
include/linux/spinlock_rt.h
··· 20 20 do { \ 21 21 rt_mutex_base_init(&(slock)->lock); \ 22 22 __rt_spin_lock_init(slock, name, key, percpu); \ 23 + __assume_ctx_lock(slock); \ 23 24 } while (0) 24 25 25 26 #define _spin_lock_init(slock, percpu) \ ··· 41 40 extern int rt_spin_trylock(spinlock_t *lock); 42 41 43 42 static __always_inline void spin_lock(spinlock_t *lock) 43 + __acquires(lock) 44 44 { 45 45 rt_spin_lock(lock); 46 46 } ··· 84 82 __spin_lock_irqsave_nested(lock, flags, subclass) 85 83 86 84 static __always_inline void spin_lock_bh(spinlock_t *lock) 85 + __acquires(lock) 87 86 { 88 87 /* Investigate: Drop bh when blocking ? */ 89 88 local_bh_disable(); ··· 92 89 } 93 90 94 91 static __always_inline void spin_lock_irq(spinlock_t *lock) 92 + __acquires(lock) 95 93 { 96 94 rt_spin_lock(lock); 97 95 } ··· 105 101 } while (0) 106 102 107 103 static __always_inline void spin_unlock(spinlock_t *lock) 104 + __releases(lock) 108 105 { 109 106 rt_spin_unlock(lock); 110 107 } 111 108 112 109 static __always_inline void spin_unlock_bh(spinlock_t *lock) 110 + __releases(lock) 113 111 { 114 112 rt_spin_unlock(lock); 115 113 local_bh_enable(); 116 114 } 117 115 118 116 static __always_inline void spin_unlock_irq(spinlock_t *lock) 117 + __releases(lock) 119 118 { 120 119 rt_spin_unlock(lock); 121 120 } 122 121 123 122 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, 124 123 unsigned long flags) 124 + __releases(lock) 125 125 { 126 126 rt_spin_unlock(lock); 127 127 } ··· 140 132 __cond_lock(lock, rt_spin_trylock(lock)) 141 133 142 134 #define spin_trylock_irqsave(lock, flags) \ 143 - ({ \ 144 - int __locked; \ 145 - \ 146 - typecheck(unsigned long, flags); \ 147 - flags = 0; \ 148 - __locked = spin_trylock(lock); \ 149 - __locked; \ 150 - }) 135 + __cond_lock(lock, ({ \ 136 + typecheck(unsigned long, flags); \ 137 + flags = 0; \ 138 + rt_spin_trylock(lock); \ 139 + })) 151 140 152 141 #define spin_is_contended(lock) (((void)(lock), 0)) 153 142
+6 -4
include/linux/spinlock_types.h
··· 14 14 #ifndef CONFIG_PREEMPT_RT 15 15 16 16 /* Non PREEMPT_RT kernels map spinlock to raw_spinlock */ 17 - typedef struct spinlock { 17 + context_lock_struct(spinlock) { 18 18 union { 19 19 struct raw_spinlock rlock; 20 20 ··· 26 26 }; 27 27 #endif 28 28 }; 29 - } spinlock_t; 29 + }; 30 + typedef struct spinlock spinlock_t; 30 31 31 32 #define ___SPIN_LOCK_INITIALIZER(lockname) \ 32 33 { \ ··· 48 47 /* PREEMPT_RT kernels map spinlock to rt_mutex */ 49 48 #include <linux/rtmutex.h> 50 49 51 - typedef struct spinlock { 50 + context_lock_struct(spinlock) { 52 51 struct rt_mutex_base lock; 53 52 #ifdef CONFIG_DEBUG_LOCK_ALLOC 54 53 struct lockdep_map dep_map; 55 54 #endif 56 - } spinlock_t; 55 + }; 56 + typedef struct spinlock spinlock_t; 57 57 58 58 #define __SPIN_LOCK_UNLOCKED(name) \ 59 59 { \
+3 -2
include/linux/spinlock_types_raw.h
··· 11 11 12 12 #include <linux/lockdep_types.h> 13 13 14 - typedef struct raw_spinlock { 14 + context_lock_struct(raw_spinlock) { 15 15 arch_spinlock_t raw_lock; 16 16 #ifdef CONFIG_DEBUG_SPINLOCK 17 17 unsigned int magic, owner_cpu; ··· 20 20 #ifdef CONFIG_DEBUG_LOCK_ALLOC 21 21 struct lockdep_map dep_map; 22 22 #endif 23 - } raw_spinlock_t; 23 + }; 24 + typedef struct raw_spinlock raw_spinlock_t; 24 25 25 26 #define SPINLOCK_MAGIC 0xdead4ead 26 27
+128
lib/test_context-analysis.c
··· 5 5 */ 6 6 7 7 #include <linux/build_bug.h> 8 + #include <linux/spinlock.h> 8 9 9 10 /* 10 11 * Test that helper macros work as expected. ··· 17 16 BUILD_BUG_ON(context_unsafe((void)2, 3) != 3); /* does not swallow commas */ 18 17 context_unsafe(do { } while (0)); /* works with void statements */ 19 18 } 19 + 20 + #define TEST_SPINLOCK_COMMON(class, type, type_init, type_lock, type_unlock, type_trylock, op) \ 21 + struct test_##class##_data { \ 22 + type lock; \ 23 + int counter __guarded_by(&lock); \ 24 + int *pointer __pt_guarded_by(&lock); \ 25 + }; \ 26 + static void __used test_##class##_init(struct test_##class##_data *d) \ 27 + { \ 28 + type_init(&d->lock); \ 29 + d->counter = 0; \ 30 + } \ 31 + static void __used test_##class(struct test_##class##_data *d) \ 32 + { \ 33 + unsigned long flags; \ 34 + d->pointer++; \ 35 + type_lock(&d->lock); \ 36 + op(d->counter); \ 37 + op(*d->pointer); \ 38 + type_unlock(&d->lock); \ 39 + type_lock##_irq(&d->lock); \ 40 + op(d->counter); \ 41 + op(*d->pointer); \ 42 + type_unlock##_irq(&d->lock); \ 43 + type_lock##_bh(&d->lock); \ 44 + op(d->counter); \ 45 + op(*d->pointer); \ 46 + type_unlock##_bh(&d->lock); \ 47 + type_lock##_irqsave(&d->lock, flags); \ 48 + op(d->counter); \ 49 + op(*d->pointer); \ 50 + type_unlock##_irqrestore(&d->lock, flags); \ 51 + } \ 52 + static void __used test_##class##_trylock(struct test_##class##_data *d) \ 53 + { \ 54 + if (type_trylock(&d->lock)) { \ 55 + op(d->counter); \ 56 + type_unlock(&d->lock); \ 57 + } \ 58 + } \ 59 + static void __used test_##class##_assert(struct test_##class##_data *d) \ 60 + { \ 61 + lockdep_assert_held(&d->lock); \ 62 + op(d->counter); \ 63 + } \ 64 + static void __used test_##class##_guard(struct test_##class##_data *d) \ 65 + { \ 66 + { guard(class)(&d->lock); op(d->counter); } \ 67 + { guard(class##_irq)(&d->lock); op(d->counter); } \ 68 + { guard(class##_irqsave)(&d->lock); op(d->counter); } \ 69 + } 70 + 71 + #define TEST_OP_RW(x) (x)++ 72 + #define TEST_OP_RO(x) ((void)(x)) 73 + 74 + TEST_SPINLOCK_COMMON(raw_spinlock, 75 + raw_spinlock_t, 76 + raw_spin_lock_init, 77 + raw_spin_lock, 78 + raw_spin_unlock, 79 + raw_spin_trylock, 80 + TEST_OP_RW); 81 + static void __used test_raw_spinlock_trylock_extra(struct test_raw_spinlock_data *d) 82 + { 83 + unsigned long flags; 84 + 85 + if (raw_spin_trylock_irq(&d->lock)) { 86 + d->counter++; 87 + raw_spin_unlock_irq(&d->lock); 88 + } 89 + if (raw_spin_trylock_irqsave(&d->lock, flags)) { 90 + d->counter++; 91 + raw_spin_unlock_irqrestore(&d->lock, flags); 92 + } 93 + scoped_cond_guard(raw_spinlock_try, return, &d->lock) { 94 + d->counter++; 95 + } 96 + } 97 + 98 + TEST_SPINLOCK_COMMON(spinlock, 99 + spinlock_t, 100 + spin_lock_init, 101 + spin_lock, 102 + spin_unlock, 103 + spin_trylock, 104 + TEST_OP_RW); 105 + static void __used test_spinlock_trylock_extra(struct test_spinlock_data *d) 106 + { 107 + unsigned long flags; 108 + 109 + if (spin_trylock_irq(&d->lock)) { 110 + d->counter++; 111 + spin_unlock_irq(&d->lock); 112 + } 113 + if (spin_trylock_irqsave(&d->lock, flags)) { 114 + d->counter++; 115 + spin_unlock_irqrestore(&d->lock, flags); 116 + } 117 + scoped_cond_guard(spinlock_try, return, &d->lock) { 118 + d->counter++; 119 + } 120 + } 121 + 122 + TEST_SPINLOCK_COMMON(write_lock, 123 + rwlock_t, 124 + rwlock_init, 125 + write_lock, 126 + write_unlock, 127 + write_trylock, 128 + TEST_OP_RW); 129 + static void __used test_write_trylock_extra(struct test_write_lock_data *d) 130 + { 131 + unsigned long flags; 132 + 133 + if (write_trylock_irqsave(&d->lock, flags)) { 134 + d->counter++; 135 + write_unlock_irqrestore(&d->lock, flags); 136 + } 137 + } 138 + 139 + TEST_SPINLOCK_COMMON(read_lock, 140 + rwlock_t, 141 + rwlock_init, 142 + read_lock, 143 + read_unlock, 144 + read_trylock, 145 + TEST_OP_RO);