Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

rcu: Support Clang's context analysis

Improve the existing annotations to properly support Clang's context
analysis.

The old annotations distinguished between RCU, RCU_BH, and RCU_SCHED;
however, to more easily be able to express that "hold the RCU read lock"
without caring if the normal, _bh(), or _sched() variant was used we'd
have to remove the distinction of the latter variants: change the _bh()
and _sched() variants to also acquire "RCU".

When (and if) we introduce context locks to denote more generally that
"IRQ", "BH", "PREEMPT" contexts are disabled, it would make sense to
acquire these instead of RCU_BH and RCU_SCHED respectively.

The above change also simplified introducing __guarded_by support, where
only the "RCU" context lock needs to be held: introduce __rcu_guarded,
where Clang's context analysis warns if a pointer is dereferenced
without any of the RCU locks held, or updated without the appropriate
helpers.

The primitives rcu_assign_pointer() and friends are wrapped with
context_unsafe(), which enforces using them to update RCU-protected
pointers marked with __rcu_guarded.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Paul E. McKenney <paulmck@kernel.org>
Link: https://patch.msgid.link/20251219154418.3592607-15-elver@google.com

authored by

Marco Elver and committed by
Peter Zijlstra
fe00f6e8 eb7d96a1

+139 -25
+1 -1
Documentation/dev-tools/context-analysis.rst
··· 80 80 81 81 Currently the following synchronization primitives are supported: 82 82 `raw_spinlock_t`, `spinlock_t`, `rwlock_t`, `mutex`, `seqlock_t`, 83 - `bit_spinlock`. 83 + `bit_spinlock`, RCU. 84 84 85 85 For context locks with an initialization function (e.g., `spin_lock_init()`), 86 86 calling this function before initializing any guarded members or globals
+53 -24
include/linux/rcupdate.h
··· 31 31 #include <asm/processor.h> 32 32 #include <linux/context_tracking_irq.h> 33 33 34 + token_context_lock(RCU, __reentrant_ctx_lock); 35 + token_context_lock_instance(RCU, RCU_SCHED); 36 + token_context_lock_instance(RCU, RCU_BH); 37 + 38 + /* 39 + * A convenience macro that can be used for RCU-protected globals or struct 40 + * members; adds type qualifier __rcu, and also enforces __guarded_by(RCU). 41 + */ 42 + #define __rcu_guarded __rcu __guarded_by(RCU) 43 + 34 44 #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) 35 45 #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) 36 46 ··· 435 425 436 426 // See RCU_LOCKDEP_WARN() for an explanation of the double call to 437 427 // debug_lockdep_rcu_enabled(). 438 - static inline bool lockdep_assert_rcu_helper(bool c) 428 + static inline bool lockdep_assert_rcu_helper(bool c, const struct __ctx_lock_RCU *ctx) 429 + __assumes_shared_ctx_lock(RCU) __assumes_shared_ctx_lock(ctx) 439 430 { 440 431 return debug_lockdep_rcu_enabled() && 441 432 (c || !rcu_is_watching() || !rcu_lockdep_current_cpu_online()) && ··· 449 438 * Splats if lockdep is enabled and there is no rcu_read_lock() in effect. 450 439 */ 451 440 #define lockdep_assert_in_rcu_read_lock() \ 452 - WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map))) 441 + WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map), RCU)) 453 442 454 443 /** 455 444 * lockdep_assert_in_rcu_read_lock_bh - WARN if not protected by rcu_read_lock_bh() ··· 459 448 * actual rcu_read_lock_bh() is required. 460 449 */ 461 450 #define lockdep_assert_in_rcu_read_lock_bh() \ 462 - WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_bh_lock_map))) 451 + WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_bh_lock_map), RCU_BH)) 463 452 464 453 /** 465 454 * lockdep_assert_in_rcu_read_lock_sched - WARN if not protected by rcu_read_lock_sched() ··· 469 458 * instead an actual rcu_read_lock_sched() is required. 470 459 */ 471 460 #define lockdep_assert_in_rcu_read_lock_sched() \ 472 - WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_sched_lock_map))) 461 + WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_sched_lock_map), RCU_SCHED)) 473 462 474 463 /** 475 464 * lockdep_assert_in_rcu_reader - WARN if not within some type of RCU reader ··· 487 476 WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map) && \ 488 477 !lock_is_held(&rcu_bh_lock_map) && \ 489 478 !lock_is_held(&rcu_sched_lock_map) && \ 490 - preemptible())) 479 + preemptible(), RCU)) 491 480 492 481 #else /* #ifdef CONFIG_PROVE_RCU */ 493 482 494 483 #define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c)) 495 484 #define rcu_sleep_check() do { } while (0) 496 485 497 - #define lockdep_assert_in_rcu_read_lock() do { } while (0) 498 - #define lockdep_assert_in_rcu_read_lock_bh() do { } while (0) 499 - #define lockdep_assert_in_rcu_read_lock_sched() do { } while (0) 500 - #define lockdep_assert_in_rcu_reader() do { } while (0) 486 + #define lockdep_assert_in_rcu_read_lock() __assume_shared_ctx_lock(RCU) 487 + #define lockdep_assert_in_rcu_read_lock_bh() __assume_shared_ctx_lock(RCU_BH) 488 + #define lockdep_assert_in_rcu_read_lock_sched() __assume_shared_ctx_lock(RCU_SCHED) 489 + #define lockdep_assert_in_rcu_reader() __assume_shared_ctx_lock(RCU) 501 490 502 491 #endif /* #else #ifdef CONFIG_PROVE_RCU */ 503 492 ··· 517 506 #endif /* #else #ifdef __CHECKER__ */ 518 507 519 508 #define __unrcu_pointer(p, local) \ 520 - ({ \ 509 + context_unsafe( \ 521 510 typeof(*p) *local = (typeof(*p) *__force)(p); \ 522 511 rcu_check_sparse(p, __rcu); \ 523 - ((typeof(*p) __force __kernel *)(local)); \ 524 - }) 512 + ((typeof(*p) __force __kernel *)(local)) \ 513 + ) 525 514 /** 526 515 * unrcu_pointer - mark a pointer as not being RCU protected 527 516 * @p: pointer needing to lose its __rcu property ··· 597 586 * other macros that it invokes. 598 587 */ 599 588 #define rcu_assign_pointer(p, v) \ 600 - do { \ 589 + context_unsafe( \ 601 590 uintptr_t _r_a_p__v = (uintptr_t)(v); \ 602 591 rcu_check_sparse(p, __rcu); \ 603 592 \ ··· 605 594 WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ 606 595 else \ 607 596 smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ 608 - } while (0) 597 + ) 609 598 610 599 /** 611 600 * rcu_replace_pointer() - replace an RCU pointer, returning its old value ··· 872 861 * only when acquiring spinlocks that are subject to priority inheritance. 873 862 */ 874 863 static __always_inline void rcu_read_lock(void) 864 + __acquires_shared(RCU) 875 865 { 876 866 __rcu_read_lock(); 877 - __acquire(RCU); 867 + __acquire_shared(RCU); 878 868 rcu_lock_acquire(&rcu_lock_map); 879 869 RCU_LOCKDEP_WARN(!rcu_is_watching(), 880 870 "rcu_read_lock() used illegally while idle"); ··· 903 891 * See rcu_read_lock() for more information. 904 892 */ 905 893 static inline void rcu_read_unlock(void) 894 + __releases_shared(RCU) 906 895 { 907 896 RCU_LOCKDEP_WARN(!rcu_is_watching(), 908 897 "rcu_read_unlock() used illegally while idle"); 909 898 rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */ 910 - __release(RCU); 899 + __release_shared(RCU); 911 900 __rcu_read_unlock(); 912 901 } 913 902 ··· 927 914 * was invoked from some other task. 928 915 */ 929 916 static inline void rcu_read_lock_bh(void) 917 + __acquires_shared(RCU) __acquires_shared(RCU_BH) 930 918 { 931 919 local_bh_disable(); 932 - __acquire(RCU_BH); 920 + __acquire_shared(RCU); 921 + __acquire_shared(RCU_BH); 933 922 rcu_lock_acquire(&rcu_bh_lock_map); 934 923 RCU_LOCKDEP_WARN(!rcu_is_watching(), 935 924 "rcu_read_lock_bh() used illegally while idle"); ··· 943 928 * See rcu_read_lock_bh() for more information. 944 929 */ 945 930 static inline void rcu_read_unlock_bh(void) 931 + __releases_shared(RCU) __releases_shared(RCU_BH) 946 932 { 947 933 RCU_LOCKDEP_WARN(!rcu_is_watching(), 948 934 "rcu_read_unlock_bh() used illegally while idle"); 949 935 rcu_lock_release(&rcu_bh_lock_map); 950 - __release(RCU_BH); 936 + __release_shared(RCU_BH); 937 + __release_shared(RCU); 951 938 local_bh_enable(); 952 939 } 953 940 ··· 969 952 * rcu_read_lock_sched() was invoked from an NMI handler. 970 953 */ 971 954 static inline void rcu_read_lock_sched(void) 955 + __acquires_shared(RCU) __acquires_shared(RCU_SCHED) 972 956 { 973 957 preempt_disable(); 974 - __acquire(RCU_SCHED); 958 + __acquire_shared(RCU); 959 + __acquire_shared(RCU_SCHED); 975 960 rcu_lock_acquire(&rcu_sched_lock_map); 976 961 RCU_LOCKDEP_WARN(!rcu_is_watching(), 977 962 "rcu_read_lock_sched() used illegally while idle"); ··· 981 962 982 963 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ 983 964 static inline notrace void rcu_read_lock_sched_notrace(void) 965 + __acquires_shared(RCU) __acquires_shared(RCU_SCHED) 984 966 { 985 967 preempt_disable_notrace(); 986 - __acquire(RCU_SCHED); 968 + __acquire_shared(RCU); 969 + __acquire_shared(RCU_SCHED); 987 970 } 988 971 989 972 /** ··· 994 973 * See rcu_read_lock_sched() for more information. 995 974 */ 996 975 static inline void rcu_read_unlock_sched(void) 976 + __releases_shared(RCU) __releases_shared(RCU_SCHED) 997 977 { 998 978 RCU_LOCKDEP_WARN(!rcu_is_watching(), 999 979 "rcu_read_unlock_sched() used illegally while idle"); 1000 980 rcu_lock_release(&rcu_sched_lock_map); 1001 - __release(RCU_SCHED); 981 + __release_shared(RCU_SCHED); 982 + __release_shared(RCU); 1002 983 preempt_enable(); 1003 984 } 1004 985 1005 986 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ 1006 987 static inline notrace void rcu_read_unlock_sched_notrace(void) 988 + __releases_shared(RCU) __releases_shared(RCU_SCHED) 1007 989 { 1008 - __release(RCU_SCHED); 990 + __release_shared(RCU_SCHED); 991 + __release_shared(RCU); 1009 992 preempt_enable_notrace(); 1010 993 } 1011 994 1012 995 static __always_inline void rcu_read_lock_dont_migrate(void) 996 + __acquires_shared(RCU) 1013 997 { 1014 998 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) 1015 999 migrate_disable(); ··· 1022 996 } 1023 997 1024 998 static inline void rcu_read_unlock_migrate(void) 999 + __releases_shared(RCU) 1025 1000 { 1026 1001 rcu_read_unlock(); 1027 1002 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) ··· 1068 1041 * ordering guarantees for either the CPU or the compiler. 1069 1042 */ 1070 1043 #define RCU_INIT_POINTER(p, v) \ 1071 - do { \ 1044 + context_unsafe( \ 1072 1045 rcu_check_sparse(p, __rcu); \ 1073 1046 WRITE_ONCE(p, RCU_INITIALIZER(v)); \ 1074 - } while (0) 1047 + ) 1075 1048 1076 1049 /** 1077 1050 * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer ··· 1232 1205 __release(RCU); 1233 1206 } while (0), 1234 1207 rcu_read_unlock()) 1208 + 1209 + DECLARE_LOCK_GUARD_0_ATTRS(rcu, __acquires_shared(RCU), __releases_shared(RCU)) 1235 1210 1236 1211 #endif /* __LINUX_RCUPDATE_H */
+85
lib/test_context-analysis.c
··· 7 7 #include <linux/bit_spinlock.h> 8 8 #include <linux/build_bug.h> 9 9 #include <linux/mutex.h> 10 + #include <linux/rcupdate.h> 10 11 #include <linux/seqlock.h> 11 12 #include <linux/spinlock.h> 12 13 ··· 284 283 d->counter++; 285 284 bit_spin_unlock(3, &d->bits); 286 285 } 286 + } 287 + 288 + /* 289 + * Test that we can mark a variable guarded by RCU, and we can dereference and 290 + * write to the pointer with RCU's primitives. 291 + */ 292 + struct test_rcu_data { 293 + long __rcu_guarded *data; 294 + }; 295 + 296 + static void __used test_rcu_guarded_reader(struct test_rcu_data *d) 297 + { 298 + rcu_read_lock(); 299 + (void)rcu_dereference(d->data); 300 + rcu_read_unlock(); 301 + 302 + rcu_read_lock_bh(); 303 + (void)rcu_dereference(d->data); 304 + rcu_read_unlock_bh(); 305 + 306 + rcu_read_lock_sched(); 307 + (void)rcu_dereference(d->data); 308 + rcu_read_unlock_sched(); 309 + } 310 + 311 + static void __used test_rcu_guard(struct test_rcu_data *d) 312 + { 313 + guard(rcu)(); 314 + (void)rcu_dereference(d->data); 315 + } 316 + 317 + static void __used test_rcu_guarded_updater(struct test_rcu_data *d) 318 + { 319 + rcu_assign_pointer(d->data, NULL); 320 + RCU_INIT_POINTER(d->data, NULL); 321 + (void)unrcu_pointer(d->data); 322 + } 323 + 324 + static void wants_rcu_held(void) __must_hold_shared(RCU) { } 325 + static void wants_rcu_held_bh(void) __must_hold_shared(RCU_BH) { } 326 + static void wants_rcu_held_sched(void) __must_hold_shared(RCU_SCHED) { } 327 + 328 + static void __used test_rcu_lock_variants(void) 329 + { 330 + rcu_read_lock(); 331 + wants_rcu_held(); 332 + rcu_read_unlock(); 333 + 334 + rcu_read_lock_bh(); 335 + wants_rcu_held_bh(); 336 + rcu_read_unlock_bh(); 337 + 338 + rcu_read_lock_sched(); 339 + wants_rcu_held_sched(); 340 + rcu_read_unlock_sched(); 341 + } 342 + 343 + static void __used test_rcu_lock_reentrant(void) 344 + { 345 + rcu_read_lock(); 346 + rcu_read_lock(); 347 + rcu_read_lock_bh(); 348 + rcu_read_lock_bh(); 349 + rcu_read_lock_sched(); 350 + rcu_read_lock_sched(); 351 + 352 + rcu_read_unlock_sched(); 353 + rcu_read_unlock_sched(); 354 + rcu_read_unlock_bh(); 355 + rcu_read_unlock_bh(); 356 + rcu_read_unlock(); 357 + rcu_read_unlock(); 358 + } 359 + 360 + static void __used test_rcu_assert_variants(void) 361 + { 362 + lockdep_assert_in_rcu_read_lock(); 363 + wants_rcu_held(); 364 + 365 + lockdep_assert_in_rcu_read_lock_bh(); 366 + wants_rcu_held_bh(); 367 + 368 + lockdep_assert_in_rcu_read_lock_sched(); 369 + wants_rcu_held_sched(); 287 370 }