Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 74cd4e0e5399480e3fab2cd6a6cbdb17f673c335 641 lines 24 kB view raw
1/* SPDX-License-Identifier: GPL-2.0+ */ 2/* 3 * Sleepable Read-Copy Update mechanism for mutual exclusion 4 * 5 * Copyright (C) IBM Corporation, 2006 6 * Copyright (C) Fujitsu, 2012 7 * 8 * Author: Paul McKenney <paulmck@linux.ibm.com> 9 * Lai Jiangshan <laijs@cn.fujitsu.com> 10 * 11 * For detailed explanation of Read-Copy Update mechanism see - 12 * Documentation/RCU/ *.txt 13 * 14 */ 15 16#ifndef _LINUX_SRCU_H 17#define _LINUX_SRCU_H 18 19#include <linux/mutex.h> 20#include <linux/rcupdate.h> 21#include <linux/workqueue.h> 22#include <linux/rcu_segcblist.h> 23 24context_lock_struct(srcu_struct, __reentrant_ctx_lock); 25 26#ifdef CONFIG_DEBUG_LOCK_ALLOC 27 28int __init_srcu_struct(struct srcu_struct *ssp, const char *name, struct lock_class_key *key); 29#ifndef CONFIG_TINY_SRCU 30int __init_srcu_struct_fast(struct srcu_struct *ssp, const char *name, struct lock_class_key *key); 31int __init_srcu_struct_fast_updown(struct srcu_struct *ssp, const char *name, 32 struct lock_class_key *key); 33#endif // #ifndef CONFIG_TINY_SRCU 34 35#define init_srcu_struct(ssp) \ 36({ \ 37 static struct lock_class_key __srcu_key; \ 38 \ 39 __init_srcu_struct((ssp), #ssp, &__srcu_key); \ 40}) 41 42#define init_srcu_struct_fast(ssp) \ 43({ \ 44 static struct lock_class_key __srcu_key; \ 45 \ 46 __init_srcu_struct_fast((ssp), #ssp, &__srcu_key); \ 47}) 48 49#define init_srcu_struct_fast_updown(ssp) \ 50({ \ 51 static struct lock_class_key __srcu_key; \ 52 \ 53 __init_srcu_struct_fast_updown((ssp), #ssp, &__srcu_key); \ 54}) 55 56#define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name }, 57#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 58 59int init_srcu_struct(struct srcu_struct *ssp); 60#ifndef CONFIG_TINY_SRCU 61int init_srcu_struct_fast(struct srcu_struct *ssp); 62int init_srcu_struct_fast_updown(struct srcu_struct *ssp); 63#endif // #ifndef CONFIG_TINY_SRCU 64 65#define __SRCU_DEP_MAP_INIT(srcu_name) 66#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 67 68/* Values for SRCU Tree srcu_data ->srcu_reader_flavor, but also used by rcutorture. */ 69#define SRCU_READ_FLAVOR_NORMAL 0x1 // srcu_read_lock(). 70#define SRCU_READ_FLAVOR_NMI 0x2 // srcu_read_lock_nmisafe(). 71// 0x4 // SRCU-lite is no longer with us. 72#define SRCU_READ_FLAVOR_FAST 0x4 // srcu_read_lock_fast(). 73#define SRCU_READ_FLAVOR_FAST_UPDOWN 0x8 // srcu_read_lock_fast(). 74#define SRCU_READ_FLAVOR_ALL (SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_NMI | \ 75 SRCU_READ_FLAVOR_FAST | SRCU_READ_FLAVOR_FAST_UPDOWN) 76 // All of the above. 77#define SRCU_READ_FLAVOR_SLOWGP (SRCU_READ_FLAVOR_FAST | SRCU_READ_FLAVOR_FAST_UPDOWN) 78 // Flavors requiring synchronize_rcu() 79 // instead of smp_mb(). 80void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases_shared(ssp); 81 82#ifdef CONFIG_TINY_SRCU 83#include <linux/srcutiny.h> 84#elif defined(CONFIG_TREE_SRCU) 85#include <linux/srcutree.h> 86#else 87#error "Unknown SRCU implementation specified to kernel configuration" 88#endif 89 90void call_srcu(struct srcu_struct *ssp, struct rcu_head *head, 91 void (*func)(struct rcu_head *head)); 92void cleanup_srcu_struct(struct srcu_struct *ssp); 93void synchronize_srcu(struct srcu_struct *ssp); 94 95#define SRCU_GET_STATE_COMPLETED 0x1 96 97/** 98 * get_completed_synchronize_srcu - Return a pre-completed polled state cookie 99 * 100 * Returns a value that poll_state_synchronize_srcu() will always treat 101 * as a cookie whose grace period has already completed. 102 */ 103static inline unsigned long get_completed_synchronize_srcu(void) 104{ 105 return SRCU_GET_STATE_COMPLETED; 106} 107 108unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp); 109unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp); 110bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie); 111 112// Maximum number of unsigned long values corresponding to 113// not-yet-completed SRCU grace periods. 114#define NUM_ACTIVE_SRCU_POLL_OLDSTATE 2 115 116/** 117 * same_state_synchronize_srcu - Are two old-state values identical? 118 * @oldstate1: First old-state value. 119 * @oldstate2: Second old-state value. 120 * 121 * The two old-state values must have been obtained from either 122 * get_state_synchronize_srcu(), start_poll_synchronize_srcu(), or 123 * get_completed_synchronize_srcu(). Returns @true if the two values are 124 * identical and @false otherwise. This allows structures whose lifetimes 125 * are tracked by old-state values to push these values to a list header, 126 * allowing those structures to be slightly smaller. 127 */ 128static inline bool same_state_synchronize_srcu(unsigned long oldstate1, unsigned long oldstate2) 129{ 130 return oldstate1 == oldstate2; 131} 132 133#ifdef CONFIG_NEED_SRCU_NMI_SAFE 134int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires_shared(ssp); 135void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases_shared(ssp); 136#else 137static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) 138 __acquires_shared(ssp) 139{ 140 return __srcu_read_lock(ssp); 141} 142static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) 143 __releases_shared(ssp) 144{ 145 __srcu_read_unlock(ssp, idx); 146} 147#endif /* CONFIG_NEED_SRCU_NMI_SAFE */ 148 149void srcu_init(void); 150 151#ifdef CONFIG_DEBUG_LOCK_ALLOC 152 153/** 154 * srcu_read_lock_held - might we be in SRCU read-side critical section? 155 * @ssp: The srcu_struct structure to check 156 * 157 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU 158 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, 159 * this assumes we are in an SRCU read-side critical section unless it can 160 * prove otherwise. 161 * 162 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot 163 * and while lockdep is disabled. 164 * 165 * Note that SRCU is based on its own statemachine and it doesn't 166 * relies on normal RCU, it can be called from the CPU which 167 * is in the idle loop from an RCU point of view or offline. 168 */ 169static inline int srcu_read_lock_held(const struct srcu_struct *ssp) 170{ 171 if (!debug_lockdep_rcu_enabled()) 172 return 1; 173 return lock_is_held(&ssp->dep_map); 174} 175 176/* 177 * Annotations provide deadlock detection for SRCU. 178 * 179 * Similar to other lockdep annotations, except there is an additional 180 * srcu_lock_sync(), which is basically an empty *write*-side critical section, 181 * see lock_sync() for more information. 182 */ 183 184/* Annotates a srcu_read_lock() */ 185static inline void srcu_lock_acquire(struct lockdep_map *map) 186{ 187 lock_map_acquire_read(map); 188} 189 190/* Annotates a srcu_read_lock() */ 191static inline void srcu_lock_release(struct lockdep_map *map) 192{ 193 lock_map_release(map); 194} 195 196/* Annotates a synchronize_srcu() */ 197static inline void srcu_lock_sync(struct lockdep_map *map) 198{ 199 lock_map_sync(map); 200} 201 202#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 203 204static inline int srcu_read_lock_held(const struct srcu_struct *ssp) 205{ 206 return 1; 207} 208 209#define srcu_lock_acquire(m) do { } while (0) 210#define srcu_lock_release(m) do { } while (0) 211#define srcu_lock_sync(m) do { } while (0) 212 213#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 214 215/* 216 * No-op helper to denote that ssp must be held. Because SRCU-protected pointers 217 * should still be marked with __rcu_guarded, and we do not want to mark them 218 * with __guarded_by(ssp) as it would complicate annotations for writers, we 219 * choose the following strategy: srcu_dereference_check() calls this helper 220 * that checks that the passed ssp is held, and then fake-acquires 'RCU'. 221 */ 222static inline void __srcu_read_lock_must_hold(const struct srcu_struct *ssp) __must_hold_shared(ssp) { } 223 224/** 225 * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing 226 * @p: the pointer to fetch and protect for later dereferencing 227 * @ssp: pointer to the srcu_struct, which is used to check that we 228 * really are in an SRCU read-side critical section. 229 * @c: condition to check for update-side use 230 * 231 * If PROVE_RCU is enabled, invoking this outside of an RCU read-side 232 * critical section will result in an RCU-lockdep splat, unless @c evaluates 233 * to 1. The @c argument will normally be a logical expression containing 234 * lockdep_is_held() calls. 235 */ 236#define srcu_dereference_check(p, ssp, c) \ 237({ \ 238 __srcu_read_lock_must_hold(ssp); \ 239 __acquire_shared_ctx_lock(RCU); \ 240 __auto_type __v = __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ 241 (c) || srcu_read_lock_held(ssp), __rcu); \ 242 __release_shared_ctx_lock(RCU); \ 243 __v; \ 244}) 245 246/** 247 * srcu_dereference - fetch SRCU-protected pointer for later dereferencing 248 * @p: the pointer to fetch and protect for later dereferencing 249 * @ssp: pointer to the srcu_struct, which is used to check that we 250 * really are in an SRCU read-side critical section. 251 * 252 * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU 253 * is enabled, invoking this outside of an RCU read-side critical 254 * section will result in an RCU-lockdep splat. 255 */ 256#define srcu_dereference(p, ssp) srcu_dereference_check((p), (ssp), 0) 257 258/** 259 * srcu_dereference_notrace - no tracing and no lockdep calls from here 260 * @p: the pointer to fetch and protect for later dereferencing 261 * @ssp: pointer to the srcu_struct, which is used to check that we 262 * really are in an SRCU read-side critical section. 263 */ 264#define srcu_dereference_notrace(p, ssp) srcu_dereference_check((p), (ssp), 1) 265 266/** 267 * srcu_read_lock - register a new reader for an SRCU-protected structure. 268 * @ssp: srcu_struct in which to register the new reader. 269 * 270 * Enter an SRCU read-side critical section. Note that SRCU read-side 271 * critical sections may be nested. However, it is illegal to 272 * call anything that waits on an SRCU grace period for the same 273 * srcu_struct, whether directly or indirectly. Please note that 274 * one way to indirectly wait on an SRCU grace period is to acquire 275 * a mutex that is held elsewhere while calling synchronize_srcu() or 276 * synchronize_srcu_expedited(). 277 * 278 * The return value from srcu_read_lock() is guaranteed to be 279 * non-negative. This value must be passed unaltered to the matching 280 * srcu_read_unlock(). Note that srcu_read_lock() and the matching 281 * srcu_read_unlock() must occur in the same context, for example, it is 282 * illegal to invoke srcu_read_unlock() in an irq handler if the matching 283 * srcu_read_lock() was invoked in process context. Or, for that matter to 284 * invoke srcu_read_unlock() from one task and the matching srcu_read_lock() 285 * from another. 286 */ 287static inline int srcu_read_lock(struct srcu_struct *ssp) 288 __acquires_shared(ssp) 289{ 290 int retval; 291 292 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); 293 retval = __srcu_read_lock(ssp); 294 srcu_lock_acquire(&ssp->dep_map); 295 return retval; 296} 297 298/** 299 * srcu_read_lock_fast - register a new reader for an SRCU-protected structure. 300 * @ssp: srcu_struct in which to register the new reader. 301 * 302 * Enter an SRCU read-side critical section, but for a light-weight 303 * smp_mb()-free reader. See srcu_read_lock() for more information. This 304 * function is NMI-safe, in a manner similar to srcu_read_lock_nmisafe(). 305 * 306 * For srcu_read_lock_fast() to be used on an srcu_struct structure, 307 * that structure must have been defined using either DEFINE_SRCU_FAST() 308 * or DEFINE_STATIC_SRCU_FAST() on the one hand or initialized with 309 * init_srcu_struct_fast() on the other. Such an srcu_struct structure 310 * cannot be passed to any non-fast variant of srcu_read_{,un}lock() or 311 * srcu_{down,up}_read(). In kernels built with CONFIG_PROVE_RCU=y, 312 * __srcu_check_read_flavor() will complain bitterly if you ignore this 313 * restriction. 314 * 315 * Grace-period auto-expediting is disabled for SRCU-fast srcu_struct 316 * structures because SRCU-fast expedited grace periods invoke 317 * synchronize_rcu_expedited(), IPIs and all. If you need expedited 318 * SRCU-fast grace periods, use synchronize_srcu_expedited(). 319 * 320 * The srcu_read_lock_fast() function can be invoked only from those 321 * contexts where RCU is watching, that is, from contexts where it would 322 * be legal to invoke rcu_read_lock(). Otherwise, lockdep will complain. 323 */ 324static inline struct srcu_ctr __percpu *srcu_read_lock_fast(struct srcu_struct *ssp) __acquires_shared(ssp) 325 __acquires_shared(ssp) 326{ 327 struct srcu_ctr __percpu *retval; 328 329 RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_lock_fast()."); 330 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST); 331 retval = __srcu_read_lock_fast(ssp); 332 rcu_try_lock_acquire(&ssp->dep_map); 333 return retval; 334} 335 336/** 337 * srcu_read_lock_fast_updown - register a new reader for an SRCU-fast-updown structure. 338 * @ssp: srcu_struct in which to register the new reader. 339 * 340 * Enter an SRCU read-side critical section, but for a light-weight 341 * smp_mb()-free reader. See srcu_read_lock() for more information. 342 * This function is compatible with srcu_down_read_fast(), but is not 343 * NMI-safe. 344 * 345 * For srcu_read_lock_fast_updown() to be used on an srcu_struct 346 * structure, that structure must have been defined using either 347 * DEFINE_SRCU_FAST_UPDOWN() or DEFINE_STATIC_SRCU_FAST_UPDOWN() on the one 348 * hand or initialized with init_srcu_struct_fast_updown() on the other. 349 * Such an srcu_struct structure cannot be passed to any non-fast-updown 350 * variant of srcu_read_{,un}lock() or srcu_{down,up}_read(). In kernels 351 * built with CONFIG_PROVE_RCU=y, __srcu_check_read_flavor() will complain 352 * bitterly if you ignore this * restriction. 353 * 354 * Grace-period auto-expediting is disabled for SRCU-fast-updown 355 * srcu_struct structures because SRCU-fast-updown expedited grace periods 356 * invoke synchronize_rcu_expedited(), IPIs and all. If you need expedited 357 * SRCU-fast-updown grace periods, use synchronize_srcu_expedited(). 358 * 359 * The srcu_read_lock_fast_updown() function can be invoked only from 360 * those contexts where RCU is watching, that is, from contexts where 361 * it would be legal to invoke rcu_read_lock(). Otherwise, lockdep will 362 * complain. 363 */ 364static inline struct srcu_ctr __percpu *srcu_read_lock_fast_updown(struct srcu_struct *ssp) 365 __acquires_shared(ssp) 366{ 367 struct srcu_ctr __percpu *retval; 368 369 RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_lock_fast_updown()."); 370 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN); 371 retval = __srcu_read_lock_fast_updown(ssp); 372 rcu_try_lock_acquire(&ssp->dep_map); 373 return retval; 374} 375 376/* 377 * Used by tracing, cannot be traced and cannot call lockdep. 378 * See srcu_read_lock_fast() for more information. 379 */ 380static inline struct srcu_ctr __percpu *srcu_read_lock_fast_notrace(struct srcu_struct *ssp) 381 __acquires_shared(ssp) 382{ 383 struct srcu_ctr __percpu *retval; 384 385 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST); 386 retval = __srcu_read_lock_fast(ssp); 387 return retval; 388} 389 390/** 391 * srcu_down_read_fast - register a new reader for an SRCU-protected structure. 392 * @ssp: srcu_struct in which to register the new reader. 393 * 394 * Enter a semaphore-like SRCU read-side critical section, but for 395 * a light-weight smp_mb()-free reader. See srcu_read_lock_fast() and 396 * srcu_down_read() for more information. 397 * 398 * The same srcu_struct may be used concurrently by srcu_down_read_fast() 399 * and srcu_read_lock_fast(). However, the same definition/initialization 400 * requirements called out for srcu_read_lock_safe() apply. 401 */ 402static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct *ssp) __acquires_shared(ssp) 403{ 404 WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi()); 405 RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_down_read_fast()."); 406 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN); 407 return __srcu_read_lock_fast_updown(ssp); 408} 409 410/** 411 * srcu_read_lock_nmisafe - register a new reader for an SRCU-protected structure. 412 * @ssp: srcu_struct in which to register the new reader. 413 * 414 * Enter an SRCU read-side critical section, but in an NMI-safe manner. 415 * See srcu_read_lock() for more information. 416 * 417 * If srcu_read_lock_nmisafe() is ever used on an srcu_struct structure, 418 * then none of the other flavors may be used, whether before, during, 419 * or after. 420 */ 421static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) 422 __acquires_shared(ssp) 423{ 424 int retval; 425 426 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI); 427 retval = __srcu_read_lock_nmisafe(ssp); 428 rcu_try_lock_acquire(&ssp->dep_map); 429 return retval; 430} 431 432/* Used by tracing, cannot be traced and cannot invoke lockdep. */ 433static inline notrace int 434srcu_read_lock_notrace(struct srcu_struct *ssp) 435 __acquires_shared(ssp) 436{ 437 int retval; 438 439 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); 440 retval = __srcu_read_lock(ssp); 441 return retval; 442} 443 444/** 445 * srcu_down_read - register a new reader for an SRCU-protected structure. 446 * @ssp: srcu_struct in which to register the new reader. 447 * 448 * Enter a semaphore-like SRCU read-side critical section. Note that 449 * SRCU read-side critical sections may be nested. However, it is 450 * illegal to call anything that waits on an SRCU grace period for the 451 * same srcu_struct, whether directly or indirectly. Please note that 452 * one way to indirectly wait on an SRCU grace period is to acquire 453 * a mutex that is held elsewhere while calling synchronize_srcu() or 454 * synchronize_srcu_expedited(). But if you want lockdep to help you 455 * keep this stuff straight, you should instead use srcu_read_lock(). 456 * 457 * The semaphore-like nature of srcu_down_read() means that the matching 458 * srcu_up_read() can be invoked from some other context, for example, 459 * from some other task or from an irq handler. However, neither 460 * srcu_down_read() nor srcu_up_read() may be invoked from an NMI handler. 461 * 462 * Calls to srcu_down_read() may be nested, similar to the manner in 463 * which calls to down_read() may be nested. The same srcu_struct may be 464 * used concurrently by srcu_down_read() and srcu_read_lock(). 465 */ 466static inline int srcu_down_read(struct srcu_struct *ssp) 467 __acquires_shared(ssp) 468{ 469 WARN_ON_ONCE(in_nmi()); 470 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); 471 return __srcu_read_lock(ssp); 472} 473 474/** 475 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. 476 * @ssp: srcu_struct in which to unregister the old reader. 477 * @idx: return value from corresponding srcu_read_lock(). 478 * 479 * Exit an SRCU read-side critical section. 480 */ 481static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx) 482 __releases_shared(ssp) 483{ 484 WARN_ON_ONCE(idx & ~0x1); 485 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); 486 srcu_lock_release(&ssp->dep_map); 487 __srcu_read_unlock(ssp, idx); 488} 489 490/** 491 * srcu_read_unlock_fast - unregister a old reader from an SRCU-protected structure. 492 * @ssp: srcu_struct in which to unregister the old reader. 493 * @scp: return value from corresponding srcu_read_lock_fast(). 494 * 495 * Exit a light-weight SRCU read-side critical section. 496 */ 497static inline void srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) 498 __releases_shared(ssp) 499{ 500 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST); 501 srcu_lock_release(&ssp->dep_map); 502 __srcu_read_unlock_fast(ssp, scp); 503 RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_unlock_fast()."); 504} 505 506/** 507 * srcu_read_unlock_fast_updown - unregister a old reader from an SRCU-fast-updown structure. 508 * @ssp: srcu_struct in which to unregister the old reader. 509 * @scp: return value from corresponding srcu_read_lock_fast_updown(). 510 * 511 * Exit an SRCU-fast-updown read-side critical section. 512 */ 513static inline void 514srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) __releases_shared(ssp) 515{ 516 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN); 517 srcu_lock_release(&ssp->dep_map); 518 __srcu_read_unlock_fast_updown(ssp, scp); 519 RCU_LOCKDEP_WARN(!rcu_is_watching(), 520 "RCU must be watching srcu_read_unlock_fast_updown()."); 521} 522 523/* 524 * Used by tracing, cannot be traced and cannot call lockdep. 525 * See srcu_read_unlock_fast() for more information. 526 */ 527static inline void srcu_read_unlock_fast_notrace(struct srcu_struct *ssp, 528 struct srcu_ctr __percpu *scp) __releases_shared(ssp) 529{ 530 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST); 531 __srcu_read_unlock_fast(ssp, scp); 532} 533 534/** 535 * srcu_up_read_fast - unregister a old reader from an SRCU-protected structure. 536 * @ssp: srcu_struct in which to unregister the old reader. 537 * @scp: return value from corresponding srcu_read_lock_fast(). 538 * 539 * Exit an SRCU read-side critical section, but not necessarily from 540 * the same context as the maching srcu_down_read_fast(). 541 */ 542static inline void srcu_up_read_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) 543 __releases_shared(ssp) 544{ 545 WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi()); 546 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN); 547 __srcu_read_unlock_fast_updown(ssp, scp); 548 RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_up_read_fast_updown()."); 549} 550 551/** 552 * srcu_read_unlock_nmisafe - unregister a old reader from an SRCU-protected structure. 553 * @ssp: srcu_struct in which to unregister the old reader. 554 * @idx: return value from corresponding srcu_read_lock_nmisafe(). 555 * 556 * Exit an SRCU read-side critical section, but in an NMI-safe manner. 557 */ 558static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) 559 __releases_shared(ssp) 560{ 561 WARN_ON_ONCE(idx & ~0x1); 562 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI); 563 rcu_lock_release(&ssp->dep_map); 564 __srcu_read_unlock_nmisafe(ssp, idx); 565} 566 567/* Used by tracing, cannot be traced and cannot call lockdep. */ 568static inline notrace void 569srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases_shared(ssp) 570{ 571 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); 572 __srcu_read_unlock(ssp, idx); 573} 574 575/** 576 * srcu_up_read - unregister a old reader from an SRCU-protected structure. 577 * @ssp: srcu_struct in which to unregister the old reader. 578 * @idx: return value from corresponding srcu_read_lock(). 579 * 580 * Exit an SRCU read-side critical section, but not necessarily from 581 * the same context as the maching srcu_down_read(). 582 */ 583static inline void srcu_up_read(struct srcu_struct *ssp, int idx) 584 __releases_shared(ssp) 585{ 586 WARN_ON_ONCE(idx & ~0x1); 587 WARN_ON_ONCE(in_nmi()); 588 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); 589 __srcu_read_unlock(ssp, idx); 590} 591 592/** 593 * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock 594 * 595 * Converts the preceding srcu_read_unlock into a two-way memory barrier. 596 * 597 * Call this after srcu_read_unlock, to guarantee that all memory operations 598 * that occur after smp_mb__after_srcu_read_unlock will appear to happen after 599 * the preceding srcu_read_unlock. 600 */ 601static inline void smp_mb__after_srcu_read_unlock(void) 602{ 603 /* __srcu_read_unlock has smp_mb() internally so nothing to do here. */ 604} 605 606/** 607 * smp_mb__after_srcu_read_lock - ensure full ordering after srcu_read_lock 608 * 609 * Converts the preceding srcu_read_lock into a two-way memory barrier. 610 * 611 * Call this after srcu_read_lock, to guarantee that all memory operations 612 * that occur after smp_mb__after_srcu_read_lock will appear to happen after 613 * the preceding srcu_read_lock. 614 */ 615static inline void smp_mb__after_srcu_read_lock(void) 616{ 617 /* __srcu_read_lock has smp_mb() internally so nothing to do here. */ 618} 619 620DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct, 621 _T->idx = srcu_read_lock(_T->lock), 622 srcu_read_unlock(_T->lock, _T->idx), 623 int idx) 624DECLARE_LOCK_GUARD_1_ATTRS(srcu, __acquires_shared(_T), __releases_shared(*(struct srcu_struct **)_T)) 625#define class_srcu_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(srcu, _T) 626 627DEFINE_LOCK_GUARD_1(srcu_fast, struct srcu_struct, 628 _T->scp = srcu_read_lock_fast(_T->lock), 629 srcu_read_unlock_fast(_T->lock, _T->scp), 630 struct srcu_ctr __percpu *scp) 631DECLARE_LOCK_GUARD_1_ATTRS(srcu_fast, __acquires_shared(_T), __releases_shared(*(struct srcu_struct **)_T)) 632#define class_srcu_fast_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(srcu_fast, _T) 633 634DEFINE_LOCK_GUARD_1(srcu_fast_notrace, struct srcu_struct, 635 _T->scp = srcu_read_lock_fast_notrace(_T->lock), 636 srcu_read_unlock_fast_notrace(_T->lock, _T->scp), 637 struct srcu_ctr __percpu *scp) 638DECLARE_LOCK_GUARD_1_ATTRS(srcu_fast_notrace, __acquires_shared(_T), __releases_shared(*(struct srcu_struct **)_T)) 639#define class_srcu_fast_notrace_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(srcu_fast_notrace, _T) 640 641#endif