Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'sched_ext-for-7.0-rc6-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext

Pull sched_ext fixes from Tejun Heo:
"These are late but both fix subtle yet critical problems and the blast
radius is limited strictly to sched_ext.

- Fix stale direct dispatch state in ddsp_dsq_id which can cause
spurious warnings in mark_direct_dispatch() on task wakeup

- Fix is_bpf_migration_disabled() false negative on non-PREEMPT_RCU
configs which can lead to incorrectly dispatching migration-
disabled tasks to remote CPUs"

* tag 'sched_ext-for-7.0-rc6-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext:
sched_ext: Fix stale direct dispatch state in ddsp_dsq_id
sched_ext: Fix is_bpf_migration_disabled() false negative on non-PREEMPT_RCU

+54 -26
+35 -14
kernel/sched/ext.c
··· 1110 1110 p->scx.dsq = dsq; 1111 1111 1112 1112 /* 1113 - * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the 1114 - * direct dispatch path, but we clear them here because the direct 1115 - * dispatch verdict may be overridden on the enqueue path during e.g. 1116 - * bypass. 1117 - */ 1118 - p->scx.ddsp_dsq_id = SCX_DSQ_INVALID; 1119 - p->scx.ddsp_enq_flags = 0; 1120 - 1121 - /* 1122 1113 * We're transitioning out of QUEUEING or DISPATCHING. store_release to 1123 1114 * match waiters' load_acquire. 1124 1115 */ ··· 1274 1283 p->scx.ddsp_enq_flags = enq_flags; 1275 1284 } 1276 1285 1286 + /* 1287 + * Clear @p direct dispatch state when leaving the scheduler. 1288 + * 1289 + * Direct dispatch state must be cleared in the following cases: 1290 + * - direct_dispatch(): cleared on the synchronous enqueue path, deferred 1291 + * dispatch keeps the state until consumed 1292 + * - process_ddsp_deferred_locals(): cleared after consuming deferred state, 1293 + * - do_enqueue_task(): cleared on enqueue fallbacks where the dispatch 1294 + * verdict is ignored (local/global/bypass) 1295 + * - dequeue_task_scx(): cleared after dispatch_dequeue(), covering deferred 1296 + * cancellation and holding_cpu races 1297 + * - scx_disable_task(): cleared for queued wakeup tasks, which are excluded by 1298 + * the scx_bypass() loop, so that stale state is not reused by a subsequent 1299 + * scheduler instance 1300 + */ 1301 + static inline void clear_direct_dispatch(struct task_struct *p) 1302 + { 1303 + p->scx.ddsp_dsq_id = SCX_DSQ_INVALID; 1304 + p->scx.ddsp_enq_flags = 0; 1305 + } 1306 + 1277 1307 static void direct_dispatch(struct scx_sched *sch, struct task_struct *p, 1278 1308 u64 enq_flags) 1279 1309 { 1280 1310 struct rq *rq = task_rq(p); 1281 1311 struct scx_dispatch_q *dsq = 1282 1312 find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p); 1313 + u64 ddsp_enq_flags; 1283 1314 1284 1315 touch_core_sched_dispatch(rq, p); 1285 1316 ··· 1342 1329 return; 1343 1330 } 1344 1331 1345 - dispatch_enqueue(sch, dsq, p, 1346 - p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS); 1332 + ddsp_enq_flags = p->scx.ddsp_enq_flags; 1333 + clear_direct_dispatch(p); 1334 + 1335 + dispatch_enqueue(sch, dsq, p, ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS); 1347 1336 } 1348 1337 1349 1338 static bool scx_rq_online(struct rq *rq) ··· 1454 1439 */ 1455 1440 touch_core_sched(rq, p); 1456 1441 refill_task_slice_dfl(sch, p); 1442 + clear_direct_dispatch(p); 1457 1443 dispatch_enqueue(sch, dsq, p, enq_flags); 1458 1444 } 1459 1445 ··· 1626 1610 sub_nr_running(rq, 1); 1627 1611 1628 1612 dispatch_dequeue(rq, p); 1613 + clear_direct_dispatch(p); 1629 1614 return true; 1630 1615 } 1631 1616 ··· 2310 2293 struct task_struct, scx.dsq_list.node))) { 2311 2294 struct scx_sched *sch = scx_root; 2312 2295 struct scx_dispatch_q *dsq; 2296 + u64 dsq_id = p->scx.ddsp_dsq_id; 2297 + u64 enq_flags = p->scx.ddsp_enq_flags; 2313 2298 2314 2299 list_del_init(&p->scx.dsq_list.node); 2300 + clear_direct_dispatch(p); 2315 2301 2316 - dsq = find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p); 2302 + dsq = find_dsq_for_dispatch(sch, rq, dsq_id, p); 2317 2303 if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL)) 2318 - dispatch_to_local_dsq(sch, rq, dsq, p, 2319 - p->scx.ddsp_enq_flags); 2304 + dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags); 2320 2305 } 2321 2306 } 2322 2307 ··· 3033 3014 3034 3015 lockdep_assert_rq_held(rq); 3035 3016 WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED); 3017 + 3018 + clear_direct_dispatch(p); 3036 3019 3037 3020 if (SCX_HAS_OP(sch, disable)) 3038 3021 SCX_CALL_OP_TASK(sch, SCX_KF_REST, disable, rq, p);
+19 -12
kernel/sched/ext_idle.c
··· 860 860 * code. 861 861 * 862 862 * We can't simply check whether @p->migration_disabled is set in a 863 - * sched_ext callback, because migration is always disabled for the current 864 - * task while running BPF code. 863 + * sched_ext callback, because the BPF prolog (__bpf_prog_enter) may disable 864 + * migration for the current task while running BPF code. 865 865 * 866 - * The prolog (__bpf_prog_enter) and epilog (__bpf_prog_exit) respectively 867 - * disable and re-enable migration. For this reason, the current task 868 - * inside a sched_ext callback is always a migration-disabled task. 866 + * Since the BPF prolog calls migrate_disable() only when CONFIG_PREEMPT_RCU 867 + * is enabled (via rcu_read_lock_dont_migrate()), migration_disabled == 1 for 868 + * the current task is ambiguous only in that case: it could be from the BPF 869 + * prolog rather than a real migrate_disable() call. 869 870 * 870 - * Therefore, when @p->migration_disabled == 1, check whether @p is the 871 - * current task or not: if it is, then migration was not disabled before 872 - * entering the callback, otherwise migration was disabled. 871 + * Without CONFIG_PREEMPT_RCU, the BPF prolog never calls migrate_disable(), 872 + * so migration_disabled == 1 always means the task is truly 873 + * migration-disabled. 874 + * 875 + * Therefore, when migration_disabled == 1 and CONFIG_PREEMPT_RCU is enabled, 876 + * check whether @p is the current task or not: if it is, then migration was 877 + * not disabled before entering the callback, otherwise migration was disabled. 873 878 * 874 879 * Returns true if @p is migration-disabled, false otherwise. 875 880 */ 876 881 static bool is_bpf_migration_disabled(const struct task_struct *p) 877 882 { 878 - if (p->migration_disabled == 1) 879 - return p != current; 880 - else 881 - return p->migration_disabled; 883 + if (p->migration_disabled == 1) { 884 + if (IS_ENABLED(CONFIG_PREEMPT_RCU)) 885 + return p != current; 886 + return true; 887 + } 888 + return p->migration_disabled; 882 889 } 883 890 884 891 static s32 select_cpu_from_kfunc(struct scx_sched *sch, struct task_struct *p,