Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

sched: Enable context analysis for core.c and fair.c

This demonstrates a larger conversion to use Clang's context
analysis. The benefit is additional static checking of locking rules,
along with better documentation.

Notably, kernel/sched contains sufficiently complex synchronization
patterns, and application to core.c & fair.c demonstrates that the
latest Clang version has become powerful enough to start applying this
to more complex subsystems (with some modest annotations and changes).

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20251219154418.3592607-37-elver@google.com

authored by

Marco Elver and committed by
Peter Zijlstra
04e49d92 dc36d55d

+177 -68
+3 -3
include/linux/sched.h
··· 2094 2094 _cond_resched(); \ 2095 2095 }) 2096 2096 2097 - extern int __cond_resched_lock(spinlock_t *lock); 2098 - extern int __cond_resched_rwlock_read(rwlock_t *lock); 2099 - extern int __cond_resched_rwlock_write(rwlock_t *lock); 2097 + extern int __cond_resched_lock(spinlock_t *lock) __must_hold(lock); 2098 + extern int __cond_resched_rwlock_read(rwlock_t *lock) __must_hold_shared(lock); 2099 + extern int __cond_resched_rwlock_write(rwlock_t *lock) __must_hold(lock); 2100 2100 2101 2101 #define MIGHT_RESCHED_RCU_SHIFT 8 2102 2102 #define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
+3 -1
include/linux/sched/signal.h
··· 738 738 (thread_group_leader(p) && !thread_group_empty(p)) 739 739 740 740 extern struct sighand_struct *lock_task_sighand(struct task_struct *task, 741 - unsigned long *flags); 741 + unsigned long *flags) 742 + __acquires(&task->sighand->siglock); 742 743 743 744 static inline void unlock_task_sighand(struct task_struct *task, 744 745 unsigned long *flags) 746 + __releases(&task->sighand->siglock) 745 747 { 746 748 spin_unlock_irqrestore(&task->sighand->siglock, *flags); 747 749 }
+5 -1
include/linux/sched/task.h
··· 214 214 * write_lock_irq(&tasklist_lock), neither inside nor outside. 215 215 */ 216 216 static inline void task_lock(struct task_struct *p) 217 + __acquires(&p->alloc_lock) 217 218 { 218 219 spin_lock(&p->alloc_lock); 219 220 } 220 221 221 222 static inline void task_unlock(struct task_struct *p) 223 + __releases(&p->alloc_lock) 222 224 { 223 225 spin_unlock(&p->alloc_lock); 224 226 } 225 227 226 - DEFINE_GUARD(task_lock, struct task_struct *, task_lock(_T), task_unlock(_T)) 228 + DEFINE_LOCK_GUARD_1(task_lock, struct task_struct, task_lock(_T->lock), task_unlock(_T->lock)) 229 + DECLARE_LOCK_GUARD_1_ATTRS(task_lock, __acquires(&_T->alloc_lock), __releases(&(*(struct task_struct **)_T)->alloc_lock)) 230 + #define class_task_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(task_lock, _T) 227 231 228 232 #endif /* _LINUX_SCHED_TASK_H */
+3
include/linux/sched/wake_q.h
··· 66 66 /* Spin unlock helpers to unlock and call wake_up_q with preempt disabled */ 67 67 static inline 68 68 void raw_spin_unlock_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q) 69 + __releases(lock) 69 70 { 70 71 guard(preempt)(); 71 72 raw_spin_unlock(lock); ··· 78 77 79 78 static inline 80 79 void raw_spin_unlock_irq_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q) 80 + __releases(lock) 81 81 { 82 82 guard(preempt)(); 83 83 raw_spin_unlock_irq(lock); ··· 91 89 static inline 92 90 void raw_spin_unlock_irqrestore_wake(raw_spinlock_t *lock, unsigned long flags, 93 91 struct wake_q_head *wake_q) 92 + __releases(lock) 94 93 { 95 94 guard(preempt)(); 96 95 raw_spin_unlock_irqrestore(lock, flags);
+3
kernel/sched/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 3 + CONTEXT_ANALYSIS_core.o := y 4 + CONTEXT_ANALYSIS_fair.o := y 5 + 3 6 # The compilers are complaining about unused variables inside an if(0) scope 4 7 # block. This is daft, shut them up. 5 8 ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
+62 -27
kernel/sched/core.c
··· 396 396 static struct cpumask sched_core_mask; 397 397 398 398 static void sched_core_lock(int cpu, unsigned long *flags) 399 + __context_unsafe(/* acquires multiple */) 400 + __acquires(&runqueues.__lock) /* overapproximation */ 399 401 { 400 402 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 401 403 int t, i = 0; ··· 408 406 } 409 407 410 408 static void sched_core_unlock(int cpu, unsigned long *flags) 409 + __context_unsafe(/* releases multiple */) 410 + __releases(&runqueues.__lock) /* overapproximation */ 411 411 { 412 412 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 413 413 int t; ··· 634 630 */ 635 631 636 632 void raw_spin_rq_lock_nested(struct rq *rq, int subclass) 633 + __context_unsafe() 637 634 { 638 635 raw_spinlock_t *lock; 639 636 ··· 660 655 } 661 656 662 657 bool raw_spin_rq_trylock(struct rq *rq) 658 + __context_unsafe() 663 659 { 664 660 raw_spinlock_t *lock; 665 661 bool ret; ··· 702 696 raw_spin_rq_lock(rq1); 703 697 if (__rq_lockp(rq1) != __rq_lockp(rq2)) 704 698 raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING); 699 + else 700 + __acquire_ctx_lock(__rq_lockp(rq2)); /* fake acquire */ 705 701 706 702 double_rq_clock_clear_update(rq1, rq2); 707 703 } 708 704 709 705 /* 710 - * __task_rq_lock - lock the rq @p resides on. 706 + * ___task_rq_lock - lock the rq @p resides on. 711 707 */ 712 - struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 713 - __acquires(rq->lock) 708 + struct rq *___task_rq_lock(struct task_struct *p, struct rq_flags *rf) 714 709 { 715 710 struct rq *rq; 716 711 ··· 734 727 /* 735 728 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. 736 729 */ 737 - struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 738 - __acquires(p->pi_lock) 739 - __acquires(rq->lock) 730 + struct rq *_task_rq_lock(struct task_struct *p, struct rq_flags *rf) 740 731 { 741 732 struct rq *rq; 742 733 ··· 2436 2431 */ 2437 2432 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, 2438 2433 struct task_struct *p, int new_cpu) 2434 + __must_hold(__rq_lockp(rq)) 2439 2435 { 2440 2436 lockdep_assert_rq_held(rq); 2441 2437 ··· 2483 2477 */ 2484 2478 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, 2485 2479 struct task_struct *p, int dest_cpu) 2480 + __must_hold(__rq_lockp(rq)) 2486 2481 { 2487 2482 /* Affinity changed (again). */ 2488 2483 if (!is_cpu_allowed(p, dest_cpu)) ··· 2519 2512 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. 2520 2513 */ 2521 2514 flush_smp_call_function_queue(); 2515 + 2516 + /* 2517 + * We may change the underlying rq, but the locks held will 2518 + * appropriately be "transferred" when switching. 2519 + */ 2520 + context_unsafe_alias(rq); 2522 2521 2523 2522 raw_spin_lock(&p->pi_lock); 2524 2523 rq_lock(rq, &rf); ··· 2636 2623 2637 2624 if (!lowest_rq) 2638 2625 goto out_unlock; 2626 + 2627 + lockdep_assert_rq_held(lowest_rq); 2639 2628 2640 2629 // XXX validate p is still the highest prio task 2641 2630 if (task_rq(p) == rq) { ··· 2849 2834 */ 2850 2835 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf, 2851 2836 int dest_cpu, unsigned int flags) 2852 - __releases(rq->lock) 2853 - __releases(p->pi_lock) 2837 + __releases(__rq_lockp(rq), &p->pi_lock) 2854 2838 { 2855 2839 struct set_affinity_pending my_pending = { }, *pending = NULL; 2856 2840 bool stop_pending, complete = false; ··· 3004 2990 struct affinity_context *ctx, 3005 2991 struct rq *rq, 3006 2992 struct rq_flags *rf) 3007 - __releases(rq->lock) 3008 - __releases(p->pi_lock) 2993 + __releases(__rq_lockp(rq), &p->pi_lock) 3009 2994 { 3010 2995 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p); 3011 2996 const struct cpumask *cpu_valid_mask = cpu_active_mask; ··· 4286 4273 */ 4287 4274 int task_call_func(struct task_struct *p, task_call_f func, void *arg) 4288 4275 { 4289 - struct rq *rq = NULL; 4290 4276 struct rq_flags rf; 4291 4277 int ret; 4292 4278 4293 4279 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 4294 4280 4295 - if (__task_needs_rq_lock(p)) 4296 - rq = __task_rq_lock(p, &rf); 4281 + if (__task_needs_rq_lock(p)) { 4282 + struct rq *rq = __task_rq_lock(p, &rf); 4297 4283 4298 - /* 4299 - * At this point the task is pinned; either: 4300 - * - blocked and we're holding off wakeups (pi->lock) 4301 - * - woken, and we're holding off enqueue (rq->lock) 4302 - * - queued, and we're holding off schedule (rq->lock) 4303 - * - running, and we're holding off de-schedule (rq->lock) 4304 - * 4305 - * The called function (@func) can use: task_curr(), p->on_rq and 4306 - * p->__state to differentiate between these states. 4307 - */ 4308 - ret = func(p, arg); 4284 + /* 4285 + * At this point the task is pinned; either: 4286 + * - blocked and we're holding off wakeups (pi->lock) 4287 + * - woken, and we're holding off enqueue (rq->lock) 4288 + * - queued, and we're holding off schedule (rq->lock) 4289 + * - running, and we're holding off de-schedule (rq->lock) 4290 + * 4291 + * The called function (@func) can use: task_curr(), p->on_rq and 4292 + * p->__state to differentiate between these states. 4293 + */ 4294 + ret = func(p, arg); 4309 4295 4310 - if (rq) 4311 4296 __task_rq_unlock(rq, p, &rf); 4297 + } else { 4298 + ret = func(p, arg); 4299 + } 4312 4300 4313 4301 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); 4314 4302 return ret; ··· 4982 4968 4983 4969 static inline void 4984 4970 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) 4971 + __releases(__rq_lockp(rq)) 4972 + __acquires(__rq_lockp(this_rq())) 4985 4973 { 4986 4974 /* 4987 4975 * Since the runqueue lock will be released by the next ··· 4997 4981 /* this is a valid case when another task releases the spinlock */ 4998 4982 rq_lockp(rq)->owner = next; 4999 4983 #endif 4984 + /* 4985 + * Model the rq reference switcheroo. 4986 + */ 4987 + __release(__rq_lockp(rq)); 4988 + __acquire(__rq_lockp(this_rq())); 5000 4989 } 5001 4990 5002 4991 static inline void finish_lock_switch(struct rq *rq) 4992 + __releases(__rq_lockp(rq)) 5003 4993 { 5004 4994 /* 5005 4995 * If we are tracking spinlock dependencies then we have to ··· 5061 5039 static inline void 5062 5040 prepare_task_switch(struct rq *rq, struct task_struct *prev, 5063 5041 struct task_struct *next) 5042 + __must_hold(__rq_lockp(rq)) 5064 5043 { 5065 5044 kcov_prepare_switch(prev); 5066 5045 sched_info_switch(rq, prev, next); ··· 5092 5069 * because prev may have moved to another CPU. 5093 5070 */ 5094 5071 static struct rq *finish_task_switch(struct task_struct *prev) 5095 - __releases(rq->lock) 5072 + __releases(__rq_lockp(this_rq())) 5096 5073 { 5097 5074 struct rq *rq = this_rq(); 5098 5075 struct mm_struct *mm = rq->prev_mm; ··· 5188 5165 * @prev: the thread we just switched away from. 5189 5166 */ 5190 5167 asmlinkage __visible void schedule_tail(struct task_struct *prev) 5191 - __releases(rq->lock) 5168 + __releases(__rq_lockp(this_rq())) 5192 5169 { 5193 5170 /* 5194 5171 * New tasks start with FORK_PREEMPT_COUNT, see there and ··· 5220 5197 static __always_inline struct rq * 5221 5198 context_switch(struct rq *rq, struct task_struct *prev, 5222 5199 struct task_struct *next, struct rq_flags *rf) 5200 + __releases(__rq_lockp(rq)) 5223 5201 { 5224 5202 prepare_task_switch(rq, prev, next); 5225 5203 ··· 5889 5865 */ 5890 5866 static inline struct task_struct * 5891 5867 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 5868 + __must_hold(__rq_lockp(rq)) 5892 5869 { 5893 5870 const struct sched_class *class; 5894 5871 struct task_struct *p; ··· 5990 5965 5991 5966 static struct task_struct * 5992 5967 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 5968 + __must_hold(__rq_lockp(rq)) 5993 5969 { 5994 5970 struct task_struct *next, *p, *max; 5995 5971 const struct cpumask *smt_mask; ··· 6299 6273 } 6300 6274 6301 6275 static void sched_core_balance(struct rq *rq) 6276 + __must_hold(__rq_lockp(rq)) 6302 6277 { 6303 6278 struct sched_domain *sd; 6304 6279 int cpu = cpu_of(rq); ··· 6445 6418 6446 6419 static struct task_struct * 6447 6420 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 6421 + __must_hold(__rq_lockp(rq)) 6448 6422 { 6449 6423 return __pick_next_task(rq, prev, rf); 6450 6424 } ··· 8071 8043 int cpu; 8072 8044 8073 8045 scoped_guard (raw_spinlock_irq, &p->pi_lock) { 8046 + /* 8047 + * We may change the underlying rq, but the locks held will 8048 + * appropriately be "transferred" when switching. 8049 + */ 8050 + context_unsafe_alias(rq); 8051 + 8074 8052 cpu = select_fallback_rq(rq->cpu, p); 8075 8053 8076 8054 rq_lock(rq, &rf); ··· 8100 8066 * effective when the hotplug motion is down. 8101 8067 */ 8102 8068 static void balance_push(struct rq *rq) 8069 + __must_hold(__rq_lockp(rq)) 8103 8070 { 8104 8071 struct task_struct *push_task = rq->curr; 8105 8072
+6 -1
kernel/sched/fair.c
··· 2860 2860 } 2861 2861 2862 2862 static void task_numa_placement(struct task_struct *p) 2863 + __context_unsafe(/* conditional locking */) 2863 2864 { 2864 2865 int seq, nid, max_nid = NUMA_NO_NODE; 2865 2866 unsigned long max_faults = 0; ··· 4782 4781 return cfs_rq->avg.load_avg; 4783 4782 } 4784 4783 4785 - static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf); 4784 + static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf) 4785 + __must_hold(__rq_lockp(this_rq)); 4786 4786 4787 4787 static inline unsigned long task_util(struct task_struct *p) 4788 4788 { ··· 6190 6188 * used to track this state. 6191 6189 */ 6192 6190 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags) 6191 + __must_hold(&cfs_b->lock) 6193 6192 { 6194 6193 int throttled; 6195 6194 ··· 8922 8919 8923 8920 struct task_struct * 8924 8921 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 8922 + __must_hold(__rq_lockp(rq)) 8925 8923 { 8926 8924 struct sched_entity *se; 8927 8925 struct task_struct *p; ··· 12862 12858 * > 0 - success, new (fair) tasks present 12863 12859 */ 12864 12860 static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf) 12861 + __must_hold(__rq_lockp(this_rq)) 12865 12862 { 12866 12863 unsigned long next_balance = jiffies + HZ; 12867 12864 int this_cpu = this_rq->cpu;
+91 -35
kernel/sched/sched.h
··· 1358 1358 return prandom_u32_state(this_cpu_ptr(&sched_rnd_state)); 1359 1359 } 1360 1360 1361 + static __always_inline struct rq *__this_rq(void) 1362 + { 1363 + return this_cpu_ptr(&runqueues); 1364 + } 1365 + 1361 1366 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 1362 - #define this_rq() this_cpu_ptr(&runqueues) 1367 + #define this_rq() __this_rq() 1363 1368 #define task_rq(p) cpu_rq(task_cpu(p)) 1364 1369 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 1365 1370 #define raw_rq() raw_cpu_ptr(&runqueues) ··· 1409 1404 } 1410 1405 1411 1406 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) 1407 + __returns_ctx_lock(rq_lockp(rq)) /* alias them */ 1412 1408 { 1413 1409 if (rq->core_enabled) 1414 1410 return &rq->core->__lock; ··· 1509 1503 } 1510 1504 1511 1505 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) 1506 + __returns_ctx_lock(rq_lockp(rq)) /* alias them */ 1512 1507 { 1513 1508 return &rq->__lock; 1514 1509 } ··· 1552 1545 #endif /* !CONFIG_RT_GROUP_SCHED */ 1553 1546 1554 1547 static inline void lockdep_assert_rq_held(struct rq *rq) 1548 + __assumes_ctx_lock(__rq_lockp(rq)) 1555 1549 { 1556 1550 lockdep_assert_held(__rq_lockp(rq)); 1557 1551 } 1558 1552 1559 - extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass); 1560 - extern bool raw_spin_rq_trylock(struct rq *rq); 1561 - extern void raw_spin_rq_unlock(struct rq *rq); 1553 + extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass) 1554 + __acquires(__rq_lockp(rq)); 1555 + 1556 + extern bool raw_spin_rq_trylock(struct rq *rq) 1557 + __cond_acquires(true, __rq_lockp(rq)); 1558 + 1559 + extern void raw_spin_rq_unlock(struct rq *rq) 1560 + __releases(__rq_lockp(rq)); 1562 1561 1563 1562 static inline void raw_spin_rq_lock(struct rq *rq) 1563 + __acquires(__rq_lockp(rq)) 1564 1564 { 1565 1565 raw_spin_rq_lock_nested(rq, 0); 1566 1566 } 1567 1567 1568 1568 static inline void raw_spin_rq_lock_irq(struct rq *rq) 1569 + __acquires(__rq_lockp(rq)) 1569 1570 { 1570 1571 local_irq_disable(); 1571 1572 raw_spin_rq_lock(rq); 1572 1573 } 1573 1574 1574 1575 static inline void raw_spin_rq_unlock_irq(struct rq *rq) 1576 + __releases(__rq_lockp(rq)) 1575 1577 { 1576 1578 raw_spin_rq_unlock(rq); 1577 1579 local_irq_enable(); 1578 1580 } 1579 1581 1580 1582 static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq) 1583 + __acquires(__rq_lockp(rq)) 1581 1584 { 1582 1585 unsigned long flags; 1583 1586 ··· 1598 1581 } 1599 1582 1600 1583 static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags) 1584 + __releases(__rq_lockp(rq)) 1601 1585 { 1602 1586 raw_spin_rq_unlock(rq); 1603 1587 local_irq_restore(flags); ··· 1847 1829 rq->clock_update_flags |= rf->clock_update_flags; 1848 1830 } 1849 1831 1850 - extern 1851 - struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1852 - __acquires(rq->lock); 1832 + #define __task_rq_lock(...) __acquire_ret(___task_rq_lock(__VA_ARGS__), __rq_lockp(__ret)) 1833 + extern struct rq *___task_rq_lock(struct task_struct *p, struct rq_flags *rf) __acquires_ret; 1853 1834 1854 - extern 1855 - struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1856 - __acquires(p->pi_lock) 1857 - __acquires(rq->lock); 1835 + #define task_rq_lock(...) __acquire_ret(_task_rq_lock(__VA_ARGS__), __rq_lockp(__ret)) 1836 + extern struct rq *_task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1837 + __acquires(&p->pi_lock) __acquires_ret; 1858 1838 1859 1839 static inline void 1860 1840 __task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1861 - __releases(rq->lock) 1841 + __releases(__rq_lockp(rq)) 1862 1842 { 1863 1843 rq_unpin_lock(rq, rf); 1864 1844 raw_spin_rq_unlock(rq); ··· 1864 1848 1865 1849 static inline void 1866 1850 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1867 - __releases(rq->lock) 1868 - __releases(p->pi_lock) 1851 + __releases(__rq_lockp(rq), &p->pi_lock) 1869 1852 { 1870 1853 __task_rq_unlock(rq, p, rf); 1871 1854 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); ··· 1874 1859 _T->rq = task_rq_lock(_T->lock, &_T->rf), 1875 1860 task_rq_unlock(_T->rq, _T->lock, &_T->rf), 1876 1861 struct rq *rq; struct rq_flags rf) 1862 + DECLARE_LOCK_GUARD_1_ATTRS(task_rq_lock, __acquires(_T->pi_lock), __releases((*(struct task_struct **)_T)->pi_lock)) 1863 + #define class_task_rq_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(task_rq_lock, _T) 1877 1864 1878 1865 DEFINE_LOCK_GUARD_1(__task_rq_lock, struct task_struct, 1879 1866 _T->rq = __task_rq_lock(_T->lock, &_T->rf), ··· 1883 1866 struct rq *rq; struct rq_flags rf) 1884 1867 1885 1868 static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) 1886 - __acquires(rq->lock) 1869 + __acquires(__rq_lockp(rq)) 1887 1870 { 1888 1871 raw_spin_rq_lock_irqsave(rq, rf->flags); 1889 1872 rq_pin_lock(rq, rf); 1890 1873 } 1891 1874 1892 1875 static inline void rq_lock_irq(struct rq *rq, struct rq_flags *rf) 1893 - __acquires(rq->lock) 1876 + __acquires(__rq_lockp(rq)) 1894 1877 { 1895 1878 raw_spin_rq_lock_irq(rq); 1896 1879 rq_pin_lock(rq, rf); 1897 1880 } 1898 1881 1899 1882 static inline void rq_lock(struct rq *rq, struct rq_flags *rf) 1900 - __acquires(rq->lock) 1883 + __acquires(__rq_lockp(rq)) 1901 1884 { 1902 1885 raw_spin_rq_lock(rq); 1903 1886 rq_pin_lock(rq, rf); 1904 1887 } 1905 1888 1906 1889 static inline void rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) 1907 - __releases(rq->lock) 1890 + __releases(__rq_lockp(rq)) 1908 1891 { 1909 1892 rq_unpin_lock(rq, rf); 1910 1893 raw_spin_rq_unlock_irqrestore(rq, rf->flags); 1911 1894 } 1912 1895 1913 1896 static inline void rq_unlock_irq(struct rq *rq, struct rq_flags *rf) 1914 - __releases(rq->lock) 1897 + __releases(__rq_lockp(rq)) 1915 1898 { 1916 1899 rq_unpin_lock(rq, rf); 1917 1900 raw_spin_rq_unlock_irq(rq); 1918 1901 } 1919 1902 1920 1903 static inline void rq_unlock(struct rq *rq, struct rq_flags *rf) 1921 - __releases(rq->lock) 1904 + __releases(__rq_lockp(rq)) 1922 1905 { 1923 1906 rq_unpin_lock(rq, rf); 1924 1907 raw_spin_rq_unlock(rq); ··· 1929 1912 rq_unlock(_T->lock, &_T->rf), 1930 1913 struct rq_flags rf) 1931 1914 1915 + DECLARE_LOCK_GUARD_1_ATTRS(rq_lock, __acquires(__rq_lockp(_T)), __releases(__rq_lockp(*(struct rq **)_T))); 1916 + #define class_rq_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rq_lock, _T) 1917 + 1932 1918 DEFINE_LOCK_GUARD_1(rq_lock_irq, struct rq, 1933 1919 rq_lock_irq(_T->lock, &_T->rf), 1934 1920 rq_unlock_irq(_T->lock, &_T->rf), 1935 1921 struct rq_flags rf) 1922 + 1923 + DECLARE_LOCK_GUARD_1_ATTRS(rq_lock_irq, __acquires(__rq_lockp(_T)), __releases(__rq_lockp(*(struct rq **)_T))); 1924 + #define class_rq_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rq_lock_irq, _T) 1936 1925 1937 1926 DEFINE_LOCK_GUARD_1(rq_lock_irqsave, struct rq, 1938 1927 rq_lock_irqsave(_T->lock, &_T->rf), 1939 1928 rq_unlock_irqrestore(_T->lock, &_T->rf), 1940 1929 struct rq_flags rf) 1941 1930 1942 - static inline struct rq *this_rq_lock_irq(struct rq_flags *rf) 1943 - __acquires(rq->lock) 1931 + DECLARE_LOCK_GUARD_1_ATTRS(rq_lock_irqsave, __acquires(__rq_lockp(_T)), __releases(__rq_lockp(*(struct rq **)_T))); 1932 + #define class_rq_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rq_lock_irqsave, _T) 1933 + 1934 + #define this_rq_lock_irq(...) __acquire_ret(_this_rq_lock_irq(__VA_ARGS__), __rq_lockp(__ret)) 1935 + static inline struct rq *_this_rq_lock_irq(struct rq_flags *rf) __acquires_ret 1944 1936 { 1945 1937 struct rq *rq; 1946 1938 ··· 3076 3050 #define DEFINE_LOCK_GUARD_2(name, type, _lock, _unlock, ...) \ 3077 3051 __DEFINE_UNLOCK_GUARD(name, type, _unlock, type *lock2; __VA_ARGS__) \ 3078 3052 static inline class_##name##_t class_##name##_constructor(type *lock, type *lock2) \ 3053 + __no_context_analysis \ 3079 3054 { class_##name##_t _t = { .lock = lock, .lock2 = lock2 }, *_T = &_t; \ 3080 3055 _lock; return _t; } 3056 + #define DECLARE_LOCK_GUARD_2_ATTRS(_name, _lock, _unlock1, _unlock2) \ 3057 + static inline class_##_name##_t class_##_name##_constructor(lock_##_name##_t *_T1, \ 3058 + lock_##_name##_t *_T2) _lock; \ 3059 + static __always_inline void __class_##_name##_cleanup_ctx1(class_##_name##_t **_T1) \ 3060 + __no_context_analysis _unlock1 { } \ 3061 + static __always_inline void __class_##_name##_cleanup_ctx2(class_##_name##_t **_T2) \ 3062 + __no_context_analysis _unlock2 { } 3063 + #define WITH_LOCK_GUARD_2_ATTRS(_name, _T1, _T2) \ 3064 + class_##_name##_constructor(_T1, _T2), \ 3065 + *__UNIQUE_ID(unlock1) __cleanup(__class_##_name##_cleanup_ctx1) = (void *)(_T1),\ 3066 + *__UNIQUE_ID(unlock2) __cleanup(__class_##_name##_cleanup_ctx2) = (void *)(_T2) 3081 3067 3082 3068 static inline bool rq_order_less(struct rq *rq1, struct rq *rq2) 3083 3069 { ··· 3117 3079 return rq1->cpu < rq2->cpu; 3118 3080 } 3119 3081 3120 - extern void double_rq_lock(struct rq *rq1, struct rq *rq2); 3082 + extern void double_rq_lock(struct rq *rq1, struct rq *rq2) 3083 + __acquires(__rq_lockp(rq1), __rq_lockp(rq2)); 3121 3084 3122 3085 #ifdef CONFIG_PREEMPTION 3123 3086 ··· 3131 3092 * also adds more overhead and therefore may reduce throughput. 3132 3093 */ 3133 3094 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 3134 - __releases(this_rq->lock) 3135 - __acquires(busiest->lock) 3136 - __acquires(this_rq->lock) 3095 + __must_hold(__rq_lockp(this_rq)) 3096 + __acquires(__rq_lockp(busiest)) 3137 3097 { 3138 3098 raw_spin_rq_unlock(this_rq); 3139 3099 double_rq_lock(this_rq, busiest); ··· 3149 3111 * regardless of entry order into the function. 3150 3112 */ 3151 3113 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 3152 - __releases(this_rq->lock) 3153 - __acquires(busiest->lock) 3154 - __acquires(this_rq->lock) 3114 + __must_hold(__rq_lockp(this_rq)) 3115 + __acquires(__rq_lockp(busiest)) 3155 3116 { 3156 - if (__rq_lockp(this_rq) == __rq_lockp(busiest) || 3157 - likely(raw_spin_rq_trylock(busiest))) { 3117 + if (__rq_lockp(this_rq) == __rq_lockp(busiest)) { 3118 + __acquire(__rq_lockp(busiest)); /* already held */ 3119 + double_rq_clock_clear_update(this_rq, busiest); 3120 + return 0; 3121 + } 3122 + 3123 + if (likely(raw_spin_rq_trylock(busiest))) { 3158 3124 double_rq_clock_clear_update(this_rq, busiest); 3159 3125 return 0; 3160 3126 } ··· 3181 3139 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 3182 3140 */ 3183 3141 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 3142 + __must_hold(__rq_lockp(this_rq)) 3143 + __acquires(__rq_lockp(busiest)) 3184 3144 { 3185 3145 lockdep_assert_irqs_disabled(); 3186 3146 ··· 3190 3146 } 3191 3147 3192 3148 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 3193 - __releases(busiest->lock) 3149 + __releases(__rq_lockp(busiest)) 3194 3150 { 3195 3151 if (__rq_lockp(this_rq) != __rq_lockp(busiest)) 3196 3152 raw_spin_rq_unlock(busiest); 3153 + else 3154 + __release(__rq_lockp(busiest)); /* fake release */ 3197 3155 lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_); 3198 3156 } 3199 3157 3200 3158 static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 3159 + __acquires(l1, l2) 3201 3160 { 3202 3161 if (l1 > l2) 3203 3162 swap(l1, l2); ··· 3210 3163 } 3211 3164 3212 3165 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 3166 + __acquires(l1, l2) 3213 3167 { 3214 3168 if (l1 > l2) 3215 3169 swap(l1, l2); ··· 3220 3172 } 3221 3173 3222 3174 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 3175 + __acquires(l1, l2) 3223 3176 { 3224 3177 if (l1 > l2) 3225 3178 swap(l1, l2); ··· 3230 3181 } 3231 3182 3232 3183 static inline void double_raw_unlock(raw_spinlock_t *l1, raw_spinlock_t *l2) 3184 + __releases(l1, l2) 3233 3185 { 3234 3186 raw_spin_unlock(l1); 3235 3187 raw_spin_unlock(l2); ··· 3240 3190 double_raw_lock(_T->lock, _T->lock2), 3241 3191 double_raw_unlock(_T->lock, _T->lock2)) 3242 3192 3193 + DECLARE_LOCK_GUARD_2_ATTRS(double_raw_spinlock, 3194 + __acquires(_T1, _T2), 3195 + __releases(*(raw_spinlock_t **)_T1), 3196 + __releases(*(raw_spinlock_t **)_T2)); 3197 + #define class_double_raw_spinlock_constructor(_T1, _T2) \ 3198 + WITH_LOCK_GUARD_2_ATTRS(double_raw_spinlock, _T1, _T2) 3199 + 3243 3200 /* 3244 3201 * double_rq_unlock - safely unlock two runqueues 3245 3202 * ··· 3254 3197 * you need to do so manually after calling. 3255 3198 */ 3256 3199 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 3257 - __releases(rq1->lock) 3258 - __releases(rq2->lock) 3200 + __releases(__rq_lockp(rq1), __rq_lockp(rq2)) 3259 3201 { 3260 3202 if (__rq_lockp(rq1) != __rq_lockp(rq2)) 3261 3203 raw_spin_rq_unlock(rq2); 3262 3204 else 3263 - __release(rq2->lock); 3205 + __release(__rq_lockp(rq2)); /* fake release */ 3264 3206 raw_spin_rq_unlock(rq1); 3265 3207 } 3266 3208
+1
scripts/context-analysis-suppression.txt
··· 26 26 src:*include/linux/rhashtable.h=emit 27 27 src:*include/linux/rwlock*.h=emit 28 28 src:*include/linux/rwsem.h=emit 29 + src:*include/linux/sched*=emit 29 30 src:*include/linux/seqlock*.h=emit 30 31 src:*include/linux/spinlock*.h=emit 31 32 src:*include/linux/srcu*.h=emit