Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

rhashtable: Enable context analysis

Enable context analysis for rhashtable, which was used as an initial
test as it contains a combination of RCU, mutex, and bit_spinlock usage.

Users of rhashtable now also benefit from annotations on the API, which
will now warn if the RCU read lock is not held where required.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20251219154418.3592607-33-elver@google.com

authored by

Marco Elver and committed by
Peter Zijlstra
322366b8 c3d3023f

+18 -5
+13 -3
include/linux/rhashtable.h
··· 245 245 void rhashtable_walk_enter(struct rhashtable *ht, 246 246 struct rhashtable_iter *iter); 247 247 void rhashtable_walk_exit(struct rhashtable_iter *iter); 248 - int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU); 248 + int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires_shared(RCU); 249 249 250 250 static inline void rhashtable_walk_start(struct rhashtable_iter *iter) 251 + __acquires_shared(RCU) 251 252 { 252 253 (void)rhashtable_walk_start_check(iter); 253 254 } 254 255 255 256 void *rhashtable_walk_next(struct rhashtable_iter *iter); 256 257 void *rhashtable_walk_peek(struct rhashtable_iter *iter); 257 - void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); 258 + void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases_shared(RCU); 258 259 259 260 void rhashtable_free_and_destroy(struct rhashtable *ht, 260 261 void (*free_fn)(void *ptr, void *arg), ··· 326 325 327 326 static inline unsigned long rht_lock(struct bucket_table *tbl, 328 327 struct rhash_lock_head __rcu **bkt) 328 + __acquires(__bitlock(0, bkt)) 329 329 { 330 330 unsigned long flags; 331 331 ··· 339 337 static inline unsigned long rht_lock_nested(struct bucket_table *tbl, 340 338 struct rhash_lock_head __rcu **bucket, 341 339 unsigned int subclass) 340 + __acquires(__bitlock(0, bucket)) 342 341 { 343 342 unsigned long flags; 344 343 ··· 352 349 static inline void rht_unlock(struct bucket_table *tbl, 353 350 struct rhash_lock_head __rcu **bkt, 354 351 unsigned long flags) 352 + __releases(__bitlock(0, bkt)) 355 353 { 356 354 lock_map_release(&tbl->dep_map); 357 355 bit_spin_unlock(0, (unsigned long *)bkt); ··· 428 424 struct rhash_lock_head __rcu **bkt, 429 425 struct rhash_head *obj, 430 426 unsigned long flags) 427 + __releases(__bitlock(0, bkt)) 431 428 { 432 429 if (rht_is_a_nulls(obj)) 433 430 obj = NULL; 434 431 lock_map_release(&tbl->dep_map); 435 432 rcu_assign_pointer(*bkt, (void *)obj); 436 433 preempt_enable(); 437 - __release(bitlock); 434 + __release(__bitlock(0, bkt)); 438 435 local_irq_restore(flags); 439 436 } 440 437 ··· 617 612 struct rhashtable *ht, const void *key, 618 613 const struct rhashtable_params params, 619 614 const enum rht_lookup_freq freq) 615 + __must_hold_shared(RCU) 620 616 { 621 617 struct rhashtable_compare_arg arg = { 622 618 .ht = ht, ··· 672 666 static __always_inline void *rhashtable_lookup( 673 667 struct rhashtable *ht, const void *key, 674 668 const struct rhashtable_params params) 669 + __must_hold_shared(RCU) 675 670 { 676 671 struct rhash_head *he = __rhashtable_lookup(ht, key, params, 677 672 RHT_LOOKUP_NORMAL); ··· 683 676 static __always_inline void *rhashtable_lookup_likely( 684 677 struct rhashtable *ht, const void *key, 685 678 const struct rhashtable_params params) 679 + __must_hold_shared(RCU) 686 680 { 687 681 struct rhash_head *he = __rhashtable_lookup(ht, key, params, 688 682 RHT_LOOKUP_LIKELY); ··· 735 727 static __always_inline struct rhlist_head *rhltable_lookup( 736 728 struct rhltable *hlt, const void *key, 737 729 const struct rhashtable_params params) 730 + __must_hold_shared(RCU) 738 731 { 739 732 struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params, 740 733 RHT_LOOKUP_NORMAL); ··· 746 737 static __always_inline struct rhlist_head *rhltable_lookup_likely( 747 738 struct rhltable *hlt, const void *key, 748 739 const struct rhashtable_params params) 740 + __must_hold_shared(RCU) 749 741 { 750 742 struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params, 751 743 RHT_LOOKUP_LIKELY);
+2
lib/Makefile
··· 50 50 lib-y += kobject.o klist.o 51 51 obj-y += lockref.o 52 52 53 + CONTEXT_ANALYSIS_rhashtable.o := y 54 + 53 55 obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \ 54 56 bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ 55 57 list_sort.o uuid.o iov_iter.o clz_ctz.o \
+3 -2
lib/rhashtable.c
··· 358 358 static int rhashtable_rehash_alloc(struct rhashtable *ht, 359 359 struct bucket_table *old_tbl, 360 360 unsigned int size) 361 + __must_hold(&ht->mutex) 361 362 { 362 363 struct bucket_table *new_tbl; 363 364 int err; ··· 393 392 * bucket locks or concurrent RCU protected lookups and traversals. 394 393 */ 395 394 static int rhashtable_shrink(struct rhashtable *ht) 395 + __must_hold(&ht->mutex) 396 396 { 397 397 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 398 398 unsigned int nelems = atomic_read(&ht->nelems); ··· 726 724 * resize events and always continue. 727 725 */ 728 726 int rhashtable_walk_start_check(struct rhashtable_iter *iter) 729 - __acquires(RCU) 727 + __acquires_shared(RCU) 730 728 { 731 729 struct rhashtable *ht = iter->ht; 732 730 bool rhlist = ht->rhlist; ··· 942 940 * hash table. 943 941 */ 944 942 void rhashtable_walk_stop(struct rhashtable_iter *iter) 945 - __releases(RCU) 946 943 { 947 944 struct rhashtable *ht; 948 945 struct bucket_table *tbl = iter->walker.tbl;