Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

srcu: Support Clang's context analysis

Add support for Clang's context analysis for SRCU.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Paul E. McKenney <paulmck@kernel.org>
Link: https://patch.msgid.link/20251219154418.3592607-16-elver@google.com

authored by

Marco Elver and committed by
Peter Zijlstra
f0b7ce22 fe00f6e8

+91 -25
+1 -1
Documentation/dev-tools/context-analysis.rst
··· 80 80 81 81 Currently the following synchronization primitives are supported: 82 82 `raw_spinlock_t`, `spinlock_t`, `rwlock_t`, `mutex`, `seqlock_t`, 83 - `bit_spinlock`, RCU. 83 + `bit_spinlock`, RCU, SRCU (`srcu_struct`). 84 84 85 85 For context locks with an initialization function (e.g., `spin_lock_init()`), 86 86 calling this function before initializing any guarded members or globals
+50 -23
include/linux/srcu.h
··· 21 21 #include <linux/workqueue.h> 22 22 #include <linux/rcu_segcblist.h> 23 23 24 - struct srcu_struct; 24 + context_lock_struct(srcu_struct, __reentrant_ctx_lock); 25 25 26 26 #ifdef CONFIG_DEBUG_LOCK_ALLOC 27 27 ··· 77 77 #define SRCU_READ_FLAVOR_SLOWGP (SRCU_READ_FLAVOR_FAST | SRCU_READ_FLAVOR_FAST_UPDOWN) 78 78 // Flavors requiring synchronize_rcu() 79 79 // instead of smp_mb(). 80 - void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp); 80 + void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases_shared(ssp); 81 81 82 82 #ifdef CONFIG_TINY_SRCU 83 83 #include <linux/srcutiny.h> ··· 131 131 } 132 132 133 133 #ifdef CONFIG_NEED_SRCU_NMI_SAFE 134 - int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp); 135 - void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp); 134 + int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires_shared(ssp); 135 + void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases_shared(ssp); 136 136 #else 137 137 static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) 138 + __acquires_shared(ssp) 138 139 { 139 140 return __srcu_read_lock(ssp); 140 141 } 141 142 static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) 143 + __releases_shared(ssp) 142 144 { 143 145 __srcu_read_unlock(ssp, idx); 144 146 } ··· 212 210 213 211 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 214 212 213 + /* 214 + * No-op helper to denote that ssp must be held. Because SRCU-protected pointers 215 + * should still be marked with __rcu_guarded, and we do not want to mark them 216 + * with __guarded_by(ssp) as it would complicate annotations for writers, we 217 + * choose the following strategy: srcu_dereference_check() calls this helper 218 + * that checks that the passed ssp is held, and then fake-acquires 'RCU'. 219 + */ 220 + static inline void __srcu_read_lock_must_hold(const struct srcu_struct *ssp) __must_hold_shared(ssp) { } 215 221 216 222 /** 217 223 * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing ··· 233 223 * to 1. The @c argument will normally be a logical expression containing 234 224 * lockdep_is_held() calls. 235 225 */ 236 - #define srcu_dereference_check(p, ssp, c) \ 237 - __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ 238 - (c) || srcu_read_lock_held(ssp), __rcu) 226 + #define srcu_dereference_check(p, ssp, c) \ 227 + ({ \ 228 + __srcu_read_lock_must_hold(ssp); \ 229 + __acquire_shared_ctx_lock(RCU); \ 230 + __auto_type __v = __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ 231 + (c) || srcu_read_lock_held(ssp), __rcu); \ 232 + __release_shared_ctx_lock(RCU); \ 233 + __v; \ 234 + }) 239 235 240 236 /** 241 237 * srcu_dereference - fetch SRCU-protected pointer for later dereferencing ··· 284 268 * invoke srcu_read_unlock() from one task and the matching srcu_read_lock() 285 269 * from another. 286 270 */ 287 - static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp) 271 + static inline int srcu_read_lock(struct srcu_struct *ssp) 272 + __acquires_shared(ssp) 288 273 { 289 274 int retval; 290 275 ··· 321 304 * contexts where RCU is watching, that is, from contexts where it would 322 305 * be legal to invoke rcu_read_lock(). Otherwise, lockdep will complain. 323 306 */ 324 - static inline struct srcu_ctr __percpu *srcu_read_lock_fast(struct srcu_struct *ssp) __acquires(ssp) 307 + static inline struct srcu_ctr __percpu *srcu_read_lock_fast(struct srcu_struct *ssp) __acquires_shared(ssp) 308 + __acquires_shared(ssp) 325 309 { 326 310 struct srcu_ctr __percpu *retval; 327 311 ··· 362 344 * complain. 363 345 */ 364 346 static inline struct srcu_ctr __percpu *srcu_read_lock_fast_updown(struct srcu_struct *ssp) 365 - __acquires(ssp) 347 + __acquires_shared(ssp) 366 348 { 367 349 struct srcu_ctr __percpu *retval; 368 350 ··· 378 360 * See srcu_read_lock_fast() for more information. 379 361 */ 380 362 static inline struct srcu_ctr __percpu *srcu_read_lock_fast_notrace(struct srcu_struct *ssp) 381 - __acquires(ssp) 363 + __acquires_shared(ssp) 382 364 { 383 365 struct srcu_ctr __percpu *retval; 384 366 ··· 399 381 * and srcu_read_lock_fast(). However, the same definition/initialization 400 382 * requirements called out for srcu_read_lock_safe() apply. 401 383 */ 402 - static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct *ssp) __acquires(ssp) 384 + static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct *ssp) __acquires_shared(ssp) 403 385 { 404 386 WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi()); 405 387 RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_down_read_fast()."); ··· 418 400 * then none of the other flavors may be used, whether before, during, 419 401 * or after. 420 402 */ 421 - static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp) 403 + static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) 404 + __acquires_shared(ssp) 422 405 { 423 406 int retval; 424 407 ··· 431 412 432 413 /* Used by tracing, cannot be traced and cannot invoke lockdep. */ 433 414 static inline notrace int 434 - srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp) 415 + srcu_read_lock_notrace(struct srcu_struct *ssp) 416 + __acquires_shared(ssp) 435 417 { 436 418 int retval; 437 419 ··· 463 443 * which calls to down_read() may be nested. The same srcu_struct may be 464 444 * used concurrently by srcu_down_read() and srcu_read_lock(). 465 445 */ 466 - static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp) 446 + static inline int srcu_down_read(struct srcu_struct *ssp) 447 + __acquires_shared(ssp) 467 448 { 468 449 WARN_ON_ONCE(in_nmi()); 469 450 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); ··· 479 458 * Exit an SRCU read-side critical section. 480 459 */ 481 460 static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx) 482 - __releases(ssp) 461 + __releases_shared(ssp) 483 462 { 484 463 WARN_ON_ONCE(idx & ~0x1); 485 464 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); ··· 495 474 * Exit a light-weight SRCU read-side critical section. 496 475 */ 497 476 static inline void srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) 498 - __releases(ssp) 477 + __releases_shared(ssp) 499 478 { 500 479 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST); 501 480 srcu_lock_release(&ssp->dep_map); ··· 511 490 * Exit an SRCU-fast-updown read-side critical section. 512 491 */ 513 492 static inline void 514 - srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) __releases(ssp) 493 + srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) __releases_shared(ssp) 515 494 { 516 495 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN); 517 496 srcu_lock_release(&ssp->dep_map); ··· 525 504 * See srcu_read_unlock_fast() for more information. 526 505 */ 527 506 static inline void srcu_read_unlock_fast_notrace(struct srcu_struct *ssp, 528 - struct srcu_ctr __percpu *scp) __releases(ssp) 507 + struct srcu_ctr __percpu *scp) __releases_shared(ssp) 529 508 { 530 509 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST); 531 510 __srcu_read_unlock_fast(ssp, scp); ··· 540 519 * the same context as the maching srcu_down_read_fast(). 541 520 */ 542 521 static inline void srcu_up_read_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) 543 - __releases(ssp) 522 + __releases_shared(ssp) 544 523 { 545 524 WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi()); 546 525 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN); ··· 556 535 * Exit an SRCU read-side critical section, but in an NMI-safe manner. 557 536 */ 558 537 static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) 559 - __releases(ssp) 538 + __releases_shared(ssp) 560 539 { 561 540 WARN_ON_ONCE(idx & ~0x1); 562 541 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI); ··· 566 545 567 546 /* Used by tracing, cannot be traced and cannot call lockdep. */ 568 547 static inline notrace void 569 - srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp) 548 + srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases_shared(ssp) 570 549 { 571 550 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); 572 551 __srcu_read_unlock(ssp, idx); ··· 581 560 * the same context as the maching srcu_down_read(). 582 561 */ 583 562 static inline void srcu_up_read(struct srcu_struct *ssp, int idx) 584 - __releases(ssp) 563 + __releases_shared(ssp) 585 564 { 586 565 WARN_ON_ONCE(idx & ~0x1); 587 566 WARN_ON_ONCE(in_nmi()); ··· 621 600 _T->idx = srcu_read_lock(_T->lock), 622 601 srcu_read_unlock(_T->lock, _T->idx), 623 602 int idx) 603 + DECLARE_LOCK_GUARD_1_ATTRS(srcu, __acquires_shared(_T), __releases_shared(*(struct srcu_struct **)_T)) 604 + #define class_srcu_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(srcu, _T) 624 605 625 606 DEFINE_LOCK_GUARD_1(srcu_fast, struct srcu_struct, 626 607 _T->scp = srcu_read_lock_fast(_T->lock), 627 608 srcu_read_unlock_fast(_T->lock, _T->scp), 628 609 struct srcu_ctr __percpu *scp) 610 + DECLARE_LOCK_GUARD_1_ATTRS(srcu_fast, __acquires_shared(_T), __releases_shared(*(struct srcu_struct **)_T)) 611 + #define class_srcu_fast_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(srcu_fast, _T) 629 612 630 613 DEFINE_LOCK_GUARD_1(srcu_fast_notrace, struct srcu_struct, 631 614 _T->scp = srcu_read_lock_fast_notrace(_T->lock), 632 615 srcu_read_unlock_fast_notrace(_T->lock, _T->scp), 633 616 struct srcu_ctr __percpu *scp) 617 + DECLARE_LOCK_GUARD_1_ATTRS(srcu_fast_notrace, __acquires_shared(_T), __releases_shared(*(struct srcu_struct **)_T)) 618 + #define class_srcu_fast_notrace_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(srcu_fast_notrace, _T) 634 619 635 620 #endif
+6
include/linux/srcutiny.h
··· 73 73 * index that must be passed to the matching srcu_read_unlock(). 74 74 */ 75 75 static inline int __srcu_read_lock(struct srcu_struct *ssp) 76 + __acquires_shared(ssp) 76 77 { 77 78 int idx; 78 79 ··· 81 80 idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1; 82 81 WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1); 83 82 preempt_enable(); 83 + __acquire_shared(ssp); 84 84 return idx; 85 85 } 86 86 ··· 98 96 } 99 97 100 98 static inline struct srcu_ctr __percpu *__srcu_read_lock_fast(struct srcu_struct *ssp) 99 + __acquires_shared(ssp) 101 100 { 102 101 return __srcu_ctr_to_ptr(ssp, __srcu_read_lock(ssp)); 103 102 } 104 103 105 104 static inline void __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) 105 + __releases_shared(ssp) 106 106 { 107 107 __srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp)); 108 108 } 109 109 110 110 static inline struct srcu_ctr __percpu *__srcu_read_lock_fast_updown(struct srcu_struct *ssp) 111 + __acquires_shared(ssp) 111 112 { 112 113 return __srcu_ctr_to_ptr(ssp, __srcu_read_lock(ssp)); 113 114 } 114 115 115 116 static inline 116 117 void __srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) 118 + __releases_shared(ssp) 117 119 { 118 120 __srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp)); 119 121 }
+9 -1
include/linux/srcutree.h
··· 233 233 #define DEFINE_STATIC_SRCU_FAST_UPDOWN(name) \ 234 234 __DEFINE_SRCU(name, SRCU_READ_FLAVOR_FAST_UPDOWN, static) 235 235 236 - int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp); 236 + int __srcu_read_lock(struct srcu_struct *ssp) __acquires_shared(ssp); 237 237 void synchronize_srcu_expedited(struct srcu_struct *ssp); 238 238 void srcu_barrier(struct srcu_struct *ssp); 239 239 void srcu_expedite_current(struct srcu_struct *ssp); ··· 286 286 * implementations of this_cpu_inc(). 287 287 */ 288 288 static inline struct srcu_ctr __percpu notrace *__srcu_read_lock_fast(struct srcu_struct *ssp) 289 + __acquires_shared(ssp) 289 290 { 290 291 struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp); 291 292 ··· 295 294 else 296 295 atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks)); // Y, and implicit RCU reader. 297 296 barrier(); /* Avoid leaking the critical section. */ 297 + __acquire_shared(ssp); 298 298 return scp; 299 299 } 300 300 ··· 310 308 */ 311 309 static inline void notrace 312 310 __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) 311 + __releases_shared(ssp) 313 312 { 313 + __release_shared(ssp); 314 314 barrier(); /* Avoid leaking the critical section. */ 315 315 if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE)) 316 316 this_cpu_inc(scp->srcu_unlocks.counter); // Z, and implicit RCU reader. ··· 330 326 */ 331 327 static inline 332 328 struct srcu_ctr __percpu notrace *__srcu_read_lock_fast_updown(struct srcu_struct *ssp) 329 + __acquires_shared(ssp) 333 330 { 334 331 struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp); 335 332 ··· 339 334 else 340 335 atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks)); // Y, and implicit RCU reader. 341 336 barrier(); /* Avoid leaking the critical section. */ 337 + __acquire_shared(ssp); 342 338 return scp; 343 339 } 344 340 ··· 354 348 */ 355 349 static inline void notrace 356 350 __srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) 351 + __releases_shared(ssp) 357 352 { 353 + __release_shared(ssp); 358 354 barrier(); /* Avoid leaking the critical section. */ 359 355 if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE)) 360 356 this_cpu_inc(scp->srcu_unlocks.counter); // Z, and implicit RCU reader.
+25
lib/test_context-analysis.c
··· 10 10 #include <linux/rcupdate.h> 11 11 #include <linux/seqlock.h> 12 12 #include <linux/spinlock.h> 13 + #include <linux/srcu.h> 13 14 14 15 /* 15 16 * Test that helper macros work as expected. ··· 369 368 370 369 lockdep_assert_in_rcu_read_lock_sched(); 371 370 wants_rcu_held_sched(); 371 + } 372 + 373 + struct test_srcu_data { 374 + struct srcu_struct srcu; 375 + long __rcu_guarded *data; 376 + }; 377 + 378 + static void __used test_srcu(struct test_srcu_data *d) 379 + { 380 + init_srcu_struct(&d->srcu); 381 + 382 + int idx = srcu_read_lock(&d->srcu); 383 + long *data = srcu_dereference(d->data, &d->srcu); 384 + (void)data; 385 + srcu_read_unlock(&d->srcu, idx); 386 + 387 + rcu_assign_pointer(d->data, NULL); 388 + } 389 + 390 + static void __used test_srcu_guard(struct test_srcu_data *d) 391 + { 392 + { guard(srcu)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); } 393 + { guard(srcu_fast)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); } 394 + { guard(srcu_fast_notrace)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); } 372 395 }