Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'sched-urgent-2021-07-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
"Three fixes:

- Fix load tracking bug/inconsistency

- Fix a sporadic CFS bandwidth constraints enforcement bug

- Fix a uclamp utilization tracking bug for newly woken tasks"

* tag 'sched-urgent-2021-07-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/uclamp: Ignore max aggregation if rq is idle
sched/fair: Fix CFS bandwidth hrtimer expiry type
sched/fair: Sync load_sum with load_avg after dequeue

+18 -10
+4 -3
kernel/sched/fair.c
··· 3037 3037 static inline void 3038 3038 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3039 3039 { 3040 + u32 divider = get_pelt_divider(&se->avg); 3040 3041 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); 3041 - sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); 3042 + cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider; 3042 3043 } 3043 3044 #else 3044 3045 static inline void ··· 5082 5081 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) 5083 5082 { 5084 5083 struct hrtimer *refresh_timer = &cfs_b->period_timer; 5085 - u64 remaining; 5084 + s64 remaining; 5086 5085 5087 5086 /* if the call-back is running a quota refresh is already occurring */ 5088 5087 if (hrtimer_callback_running(refresh_timer)) ··· 5090 5089 5091 5090 /* is a quota refresh about to occur? */ 5092 5091 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer)); 5093 - if (remaining < min_expire) 5092 + if (remaining < (s64)min_expire) 5094 5093 return 1; 5095 5094 5096 5095 return 0;
+14 -7
kernel/sched/sched.h
··· 2818 2818 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, 2819 2819 struct task_struct *p) 2820 2820 { 2821 - unsigned long min_util; 2822 - unsigned long max_util; 2821 + unsigned long min_util = 0; 2822 + unsigned long max_util = 0; 2823 2823 2824 2824 if (!static_branch_likely(&sched_uclamp_used)) 2825 2825 return util; 2826 2826 2827 - min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value); 2828 - max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); 2829 - 2830 2827 if (p) { 2831 - min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN)); 2832 - max_util = max(max_util, uclamp_eff_value(p, UCLAMP_MAX)); 2828 + min_util = uclamp_eff_value(p, UCLAMP_MIN); 2829 + max_util = uclamp_eff_value(p, UCLAMP_MAX); 2830 + 2831 + /* 2832 + * Ignore last runnable task's max clamp, as this task will 2833 + * reset it. Similarly, no need to read the rq's min clamp. 2834 + */ 2835 + if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) 2836 + goto out; 2833 2837 } 2834 2838 2839 + min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value)); 2840 + max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value)); 2841 + out: 2835 2842 /* 2836 2843 * Since CPU's {min,max}_util clamps are MAX aggregated considering 2837 2844 * RUNNABLE tasks with _different_ clamps, we can end up with an