Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'sched-urgent-2026-04-05' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:

- Fix zero_vruntime tracking again (Peter Zijlstra)

- Fix avg_vruntime() usage in sched_debug (Peter Zijlstra)

* tag 'sched-urgent-2026-04-05' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/debug: Fix avg_vruntime() usage
sched/fair: Fix zero_vruntime tracking fix

+6 -8
+3 -1
kernel/sched/debug.c
··· 902 902 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) 903 903 { 904 904 s64 left_vruntime = -1, zero_vruntime, right_vruntime = -1, left_deadline = -1, spread; 905 + u64 avruntime; 905 906 struct sched_entity *last, *first, *root; 906 907 struct rq *rq = cpu_rq(cpu); 907 908 unsigned long flags; ··· 926 925 if (last) 927 926 right_vruntime = last->vruntime; 928 927 zero_vruntime = cfs_rq->zero_vruntime; 928 + avruntime = avg_vruntime(cfs_rq); 929 929 raw_spin_rq_unlock_irqrestore(rq, flags); 930 930 931 931 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "left_deadline", ··· 936 934 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "zero_vruntime", 937 935 SPLIT_NS(zero_vruntime)); 938 936 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "avg_vruntime", 939 - SPLIT_NS(avg_vruntime(cfs_rq))); 937 + SPLIT_NS(avruntime)); 940 938 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "right_vruntime", 941 939 SPLIT_NS(right_vruntime)); 942 940 spread = right_vruntime - left_vruntime;
+3 -7
kernel/sched/fair.c
··· 707 707 * Called in: 708 708 * - place_entity() -- before enqueue 709 709 * - update_entity_lag() -- before dequeue 710 - * - entity_tick() 710 + * - update_deadline() -- slice expiration 711 711 * 712 712 * This means it is one entry 'behind' but that puts it close enough to where 713 713 * the bound on entity_key() is at most two lag bounds. ··· 1131 1131 * EEVDF: vd_i = ve_i + r_i / w_i 1132 1132 */ 1133 1133 se->deadline = se->vruntime + calc_delta_fair(se->slice, se); 1134 + avg_vruntime(cfs_rq); 1134 1135 1135 1136 /* 1136 1137 * The task has consumed its request, reschedule. ··· 5594 5593 update_load_avg(cfs_rq, curr, UPDATE_TG); 5595 5594 update_cfs_group(curr); 5596 5595 5597 - /* 5598 - * Pulls along cfs_rq::zero_vruntime. 5599 - */ 5600 - avg_vruntime(cfs_rq); 5601 - 5602 5596 #ifdef CONFIG_SCHED_HRTICK 5603 5597 /* 5604 5598 * queued ticks are scheduled to match the slice, so don't bother ··· 9124 9128 */ 9125 9129 if (entity_eligible(cfs_rq, se)) { 9126 9130 se->vruntime = se->deadline; 9127 - se->deadline += calc_delta_fair(se->slice, se); 9131 + update_deadline(cfs_rq, se); 9128 9132 } 9129 9133 } 9130 9134