Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'sched_ext-for-6.15-rc0-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext

Pull sched_ext fixes from Tejun Heo:

- Calling scx_bpf_create_dsq() with the same ID would succeed creating
duplicate DSQs. Fix it to return -EEXIST.

- scx_select_cpu_dfl() fixes and cleanups.

- Synchronize tool/sched_ext with external scheduler repo. While this
isn't a fix. There's no risk to the kernel and it's better if they
stay synced closer.

* tag 'sched_ext-for-6.15-rc0-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext:
tools/sched_ext: Sync with scx repo
sched_ext: initialize built-in idle state before ops.init()
sched_ext: create_dsq: Return -EEXIST on duplicate request
sched_ext: Remove a meaningless conditional goto in scx_select_cpu_dfl()
sched_ext: idle: Fix return code of scx_select_cpu_dfl()

+103 -40
+4 -4
kernel/sched/ext.c
··· 4171 4171 4172 4172 init_dsq(dsq, dsq_id); 4173 4173 4174 - ret = rhashtable_insert_fast(&dsq_hash, &dsq->hash_node, 4175 - dsq_hash_params); 4174 + ret = rhashtable_lookup_insert_fast(&dsq_hash, &dsq->hash_node, 4175 + dsq_hash_params); 4176 4176 if (ret) { 4177 4177 kfree(dsq); 4178 4178 return ERR_PTR(ret); ··· 5361 5361 */ 5362 5362 cpus_read_lock(); 5363 5363 5364 + scx_idle_enable(ops); 5365 + 5364 5366 if (scx_ops.init) { 5365 5367 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init); 5366 5368 if (ret) { ··· 5428 5426 static_branch_enable(&scx_ops_enq_migration_disabled); 5429 5427 if (scx_ops.cpu_acquire || scx_ops.cpu_release) 5430 5428 static_branch_enable(&scx_ops_cpu_preempt); 5431 - 5432 - scx_idle_enable(ops); 5433 5429 5434 5430 /* 5435 5431 * Lock out forks, cgroup on/offlining and moves before opening the
+5 -7
kernel/sched/ext_idle.c
··· 544 544 * core. 545 545 */ 546 546 if (flags & SCX_PICK_IDLE_CORE) { 547 - cpu = prev_cpu; 547 + cpu = -EBUSY; 548 548 goto out_unlock; 549 549 } 550 550 } ··· 584 584 * increasing distance. 585 585 */ 586 586 cpu = scx_pick_idle_cpu(p->cpus_ptr, node, flags); 587 - if (cpu >= 0) 588 - goto out_unlock; 589 587 590 588 out_unlock: 591 589 rcu_read_unlock(); ··· 721 723 void scx_idle_enable(struct sched_ext_ops *ops) 722 724 { 723 725 if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) 724 - static_branch_enable(&scx_builtin_idle_enabled); 726 + static_branch_enable_cpuslocked(&scx_builtin_idle_enabled); 725 727 else 726 - static_branch_disable(&scx_builtin_idle_enabled); 728 + static_branch_disable_cpuslocked(&scx_builtin_idle_enabled); 727 729 728 730 if (ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) 729 - static_branch_enable(&scx_builtin_idle_per_node); 731 + static_branch_enable_cpuslocked(&scx_builtin_idle_per_node); 730 732 else 731 - static_branch_disable(&scx_builtin_idle_per_node); 733 + static_branch_disable_cpuslocked(&scx_builtin_idle_per_node); 732 734 733 735 #ifdef CONFIG_SMP 734 736 reset_idle_masks(ops);
+57 -28
tools/sched_ext/include/scx/common.bpf.h
··· 586 586 } 587 587 } 588 588 589 - #define READ_ONCE(x) \ 590 - ({ \ 591 - union { typeof(x) __val; char __c[1]; } __u = \ 592 - { .__c = { 0 } }; \ 593 - __read_once_size(&(x), __u.__c, sizeof(x)); \ 594 - __u.__val; \ 589 + /* 590 + * __unqual_typeof(x) - Declare an unqualified scalar type, leaving 591 + * non-scalar types unchanged, 592 + * 593 + * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char' 594 + * is not type-compatible with 'signed char', and we define a separate case. 595 + * 596 + * This is copied verbatim from kernel's include/linux/compiler_types.h, but 597 + * with default expression (for pointers) changed from (x) to (typeof(x)0). 598 + * 599 + * This is because LLVM has a bug where for lvalue (x), it does not get rid of 600 + * an extra address_space qualifier, but does in case of rvalue (typeof(x)0). 601 + * Hence, for pointers, we need to create an rvalue expression to get the 602 + * desired type. See https://github.com/llvm/llvm-project/issues/53400. 603 + */ 604 + #define __scalar_type_to_expr_cases(type) \ 605 + unsigned type : (unsigned type)0, signed type : (signed type)0 606 + 607 + #define __unqual_typeof(x) \ 608 + typeof(_Generic((x), \ 609 + char: (char)0, \ 610 + __scalar_type_to_expr_cases(char), \ 611 + __scalar_type_to_expr_cases(short), \ 612 + __scalar_type_to_expr_cases(int), \ 613 + __scalar_type_to_expr_cases(long), \ 614 + __scalar_type_to_expr_cases(long long), \ 615 + default: (typeof(x))0)) 616 + 617 + #define READ_ONCE(x) \ 618 + ({ \ 619 + union { __unqual_typeof(x) __val; char __c[1]; } __u = \ 620 + { .__c = { 0 } }; \ 621 + __read_once_size((__unqual_typeof(x) *)&(x), __u.__c, sizeof(x)); \ 622 + __u.__val; \ 595 623 }) 596 624 597 - #define WRITE_ONCE(x, val) \ 598 - ({ \ 599 - union { typeof(x) __val; char __c[1]; } __u = \ 600 - { .__val = (val) }; \ 601 - __write_once_size(&(x), __u.__c, sizeof(x)); \ 602 - __u.__val; \ 603 - }) 604 - 605 - #define READ_ONCE_ARENA(type, x) \ 606 - ({ \ 607 - union { type __val; char __c[1]; } __u = \ 608 - { .__c = { 0 } }; \ 609 - __read_once_size((void *)&(x), __u.__c, sizeof(x)); \ 610 - __u.__val; \ 611 - }) 612 - 613 - #define WRITE_ONCE_ARENA(type, x, val) \ 614 - ({ \ 615 - union { type __val; char __c[1]; } __u = \ 616 - { .__val = (val) }; \ 617 - __write_once_size((void *)&(x), __u.__c, sizeof(x)); \ 618 - __u.__val; \ 625 + #define WRITE_ONCE(x, val) \ 626 + ({ \ 627 + union { __unqual_typeof(x) __val; char __c[1]; } __u = \ 628 + { .__val = (val) }; \ 629 + __write_once_size((__unqual_typeof(x) *)&(x), __u.__c, sizeof(x)); \ 630 + __u.__val; \ 619 631 }) 620 632 621 633 /* ··· 659 647 else 660 648 return log2_u32(v) + 1; 661 649 } 650 + 651 + /* 652 + * Return a value proportionally scaled to the task's weight. 653 + */ 654 + static inline u64 scale_by_task_weight(const struct task_struct *p, u64 value) 655 + { 656 + return (value * p->scx.weight) / 100; 657 + } 658 + 659 + /* 660 + * Return a value inversely proportional to the task's weight. 661 + */ 662 + static inline u64 scale_by_task_weight_inverse(const struct task_struct *p, u64 value) 663 + { 664 + return value * 100 / p->scx.weight; 665 + } 666 + 662 667 663 668 #include "compat.bpf.h" 664 669 #include "enums.bpf.h"
+3
tools/sched_ext/include/scx/enum_defs.autogen.h
··· 88 88 #define HAVE_SCX_OPS_ENQ_LAST 89 89 #define HAVE_SCX_OPS_ENQ_EXITING 90 90 #define HAVE_SCX_OPS_SWITCH_PARTIAL 91 + #define HAVE_SCX_OPS_ENQ_MIGRATION_DISABLED 92 + #define HAVE_SCX_OPS_ALLOW_QUEUED_WAKEUP 91 93 #define HAVE_SCX_OPS_HAS_CGROUP_WEIGHT 92 94 #define HAVE_SCX_OPS_ALL_FLAGS 93 95 #define HAVE_SCX_OPSS_NONE ··· 106 104 #define HAVE_SCX_RQ_BAL_PENDING 107 105 #define HAVE_SCX_RQ_BAL_KEEP 108 106 #define HAVE_SCX_RQ_BYPASSING 107 + #define HAVE_SCX_RQ_CLK_VALID 109 108 #define HAVE_SCX_RQ_IN_WAKEUP 110 109 #define HAVE_SCX_RQ_IN_BALANCE 111 110 #define HAVE_SCX_TASK_NONE
+24
tools/sched_ext/include/scx/enums.autogen.bpf.h
··· 13 13 const volatile u64 __SCX_SLICE_INF __weak; 14 14 #define SCX_SLICE_INF __SCX_SLICE_INF 15 15 16 + const volatile u64 __SCX_RQ_ONLINE __weak; 17 + #define SCX_RQ_ONLINE __SCX_RQ_ONLINE 18 + 19 + const volatile u64 __SCX_RQ_CAN_STOP_TICK __weak; 20 + #define SCX_RQ_CAN_STOP_TICK __SCX_RQ_CAN_STOP_TICK 21 + 22 + const volatile u64 __SCX_RQ_BAL_PENDING __weak; 23 + #define SCX_RQ_BAL_PENDING __SCX_RQ_BAL_PENDING 24 + 25 + const volatile u64 __SCX_RQ_BAL_KEEP __weak; 26 + #define SCX_RQ_BAL_KEEP __SCX_RQ_BAL_KEEP 27 + 28 + const volatile u64 __SCX_RQ_BYPASSING __weak; 29 + #define SCX_RQ_BYPASSING __SCX_RQ_BYPASSING 30 + 31 + const volatile u64 __SCX_RQ_CLK_VALID __weak; 32 + #define SCX_RQ_CLK_VALID __SCX_RQ_CLK_VALID 33 + 34 + const volatile u64 __SCX_RQ_IN_WAKEUP __weak; 35 + #define SCX_RQ_IN_WAKEUP __SCX_RQ_IN_WAKEUP 36 + 37 + const volatile u64 __SCX_RQ_IN_BALANCE __weak; 38 + #define SCX_RQ_IN_BALANCE __SCX_RQ_IN_BALANCE 39 + 16 40 const volatile u64 __SCX_DSQ_FLAG_BUILTIN __weak; 17 41 #define SCX_DSQ_FLAG_BUILTIN __SCX_DSQ_FLAG_BUILTIN 18 42
+8
tools/sched_ext/include/scx/enums.autogen.h
··· 8 8 SCX_ENUM_SET(skel, scx_public_consts, SCX_OPS_NAME_LEN); \ 9 9 SCX_ENUM_SET(skel, scx_public_consts, SCX_SLICE_DFL); \ 10 10 SCX_ENUM_SET(skel, scx_public_consts, SCX_SLICE_INF); \ 11 + SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_ONLINE); \ 12 + SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_CAN_STOP_TICK); \ 13 + SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_BAL_PENDING); \ 14 + SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_BAL_KEEP); \ 15 + SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_BYPASSING); \ 16 + SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_CLK_VALID); \ 17 + SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_IN_WAKEUP); \ 18 + SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_IN_BALANCE); \ 11 19 SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_FLAG_BUILTIN); \ 12 20 SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_FLAG_LOCAL_ON); \ 13 21 SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_INVALID); \
+2 -1
tools/sched_ext/include/scx/enums.h
··· 14 14 bool res; 15 15 16 16 res = __COMPAT_read_enum(type, name, val); 17 - SCX_BUG_ON(!res, "enum not found(%s)", name); 17 + if (!res) 18 + *val = 0; 18 19 } 19 20 20 21 #define SCX_ENUM_SET(skel, type, name) do { \