Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

sched_ext: Pass held rq to SCX_CALL_OP() for dump_cpu/dump_task

scx_dump_state() walks CPUs with rq_lock_irqsave() held and invokes
ops.dump_cpu / ops.dump_task with NULL locked_rq, leaving
scx_locked_rq_state NULL. If the BPF callback calls a kfunc that
re-acquires rq based on scx_locked_rq() - e.g. scx_bpf_cpuperf_set(cpu)
- it re-acquires the already-held rq.

Pass the held rq to SCX_CALL_OP(). Thread it into scx_dump_task() too.
The pre-loop ops.dump call runs before rq_lock_irqsave() so keeps
rq=NULL.

Fixes: 07814a9439a3 ("sched_ext: Print debug dump after an error exit")
Cc: stable@vger.kernel.org # v6.12+
Reported-by: Chris Mason <clm@meta.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Andrea Righi <arighi@nvidia.com>

+6 -8
+6 -8
kernel/sched/ext.c
··· 6117 6117 scx_dump_data.cpu = -1; 6118 6118 } 6119 6119 6120 - static void scx_dump_task(struct scx_sched *sch, 6121 - struct seq_buf *s, struct scx_dump_ctx *dctx, 6122 - struct task_struct *p, char marker) 6120 + static void scx_dump_task(struct scx_sched *sch, struct seq_buf *s, struct scx_dump_ctx *dctx, 6121 + struct rq *rq, struct task_struct *p, char marker) 6123 6122 { 6124 6123 static unsigned long bt[SCX_EXIT_BT_LEN]; 6125 6124 struct scx_sched *task_sch = scx_task_sched(p); ··· 6159 6160 6160 6161 if (SCX_HAS_OP(sch, dump_task)) { 6161 6162 ops_dump_init(s, " "); 6162 - SCX_CALL_OP(sch, dump_task, NULL, dctx, p); 6163 + SCX_CALL_OP(sch, dump_task, rq, dctx, p); 6163 6164 ops_dump_exit(); 6164 6165 } 6165 6166 ··· 6283 6284 used = seq_buf_used(&ns); 6284 6285 if (SCX_HAS_OP(sch, dump_cpu)) { 6285 6286 ops_dump_init(&ns, " "); 6286 - SCX_CALL_OP(sch, dump_cpu, NULL, 6287 - &dctx, cpu, idle); 6287 + SCX_CALL_OP(sch, dump_cpu, rq, &dctx, cpu, idle); 6288 6288 ops_dump_exit(); 6289 6289 } 6290 6290 ··· 6306 6308 6307 6309 if (rq->curr->sched_class == &ext_sched_class && 6308 6310 (dump_all_tasks || scx_task_on_sched(sch, rq->curr))) 6309 - scx_dump_task(sch, &s, &dctx, rq->curr, '*'); 6311 + scx_dump_task(sch, &s, &dctx, rq, rq->curr, '*'); 6310 6312 6311 6313 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) 6312 6314 if (dump_all_tasks || scx_task_on_sched(sch, p)) 6313 - scx_dump_task(sch, &s, &dctx, p, ' '); 6315 + scx_dump_task(sch, &s, &dctx, rq, p, ' '); 6314 6316 next: 6315 6317 rq_unlock_irqrestore(rq, &rf); 6316 6318 }