Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'sched-urgent-2022-06-19' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fix from Thomas Gleixner:
"A single scheduler fix plugging a race between sched_setscheduler()
and balance_push().

sched_setscheduler() spliced the balance callbacks accross a lock
break which makes it possible for an interleaving schedule() to
observe an empty list"

* tag 'sched-urgent-2022-06-19' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched: Fix balance_push() vs __sched_setscheduler()

+38 -3
+33 -3
kernel/sched/core.c
··· 4798 4798 4799 4799 static void balance_push(struct rq *rq); 4800 4800 4801 + /* 4802 + * balance_push_callback is a right abuse of the callback interface and plays 4803 + * by significantly different rules. 4804 + * 4805 + * Where the normal balance_callback's purpose is to be ran in the same context 4806 + * that queued it (only later, when it's safe to drop rq->lock again), 4807 + * balance_push_callback is specifically targeted at __schedule(). 4808 + * 4809 + * This abuse is tolerated because it places all the unlikely/odd cases behind 4810 + * a single test, namely: rq->balance_callback == NULL. 4811 + */ 4801 4812 struct callback_head balance_push_callback = { 4802 4813 .next = NULL, 4803 4814 .func = (void (*)(struct callback_head *))balance_push, 4804 4815 }; 4805 4816 4806 - static inline struct callback_head *splice_balance_callbacks(struct rq *rq) 4817 + static inline struct callback_head * 4818 + __splice_balance_callbacks(struct rq *rq, bool split) 4807 4819 { 4808 4820 struct callback_head *head = rq->balance_callback; 4809 4821 4822 + if (likely(!head)) 4823 + return NULL; 4824 + 4810 4825 lockdep_assert_rq_held(rq); 4811 - if (head) 4826 + /* 4827 + * Must not take balance_push_callback off the list when 4828 + * splice_balance_callbacks() and balance_callbacks() are not 4829 + * in the same rq->lock section. 4830 + * 4831 + * In that case it would be possible for __schedule() to interleave 4832 + * and observe the list empty. 4833 + */ 4834 + if (split && head == &balance_push_callback) 4835 + head = NULL; 4836 + else 4812 4837 rq->balance_callback = NULL; 4813 4838 4814 4839 return head; 4815 4840 } 4816 4841 4842 + static inline struct callback_head *splice_balance_callbacks(struct rq *rq) 4843 + { 4844 + return __splice_balance_callbacks(rq, true); 4845 + } 4846 + 4817 4847 static void __balance_callbacks(struct rq *rq) 4818 4848 { 4819 - do_balance_callbacks(rq, splice_balance_callbacks(rq)); 4849 + do_balance_callbacks(rq, __splice_balance_callbacks(rq, false)); 4820 4850 } 4821 4851 4822 4852 static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
+5
kernel/sched/sched.h
··· 1693 1693 { 1694 1694 lockdep_assert_rq_held(rq); 1695 1695 1696 + /* 1697 + * Don't (re)queue an already queued item; nor queue anything when 1698 + * balance_push() is active, see the comment with 1699 + * balance_push_callback. 1700 + */ 1696 1701 if (unlikely(head->next || rq->balance_callback == &balance_push_callback)) 1697 1702 return; 1698 1703