Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

cpuset: move update_domain_attr_tree to cpuset_v1.c

Since relax_domain_level is only applicable to v1, move
update_domain_attr_tree() to cpuset-v1.c, which solely updates
relax_domain_level,

Additionally, relax_domain_level is now initialized in cpuset1_inited.
Accordingly, the initialization of relax_domain_level in top_cpuset is
removed. The unnecessary remote_partition initialization in top_cpuset
is also cleaned up.

As a result, relax_domain_level can be defined in cpuset only when
CONFIG_CPUSETS_V1=y.

Signed-off-by: Chen Ridong <chenridong@huawei.com>
Reviewed-by: Waiman Long <longman@redhat.com>
Signed-off-by: Tejun Heo <tj@kernel.org>

authored by

Chen Ridong and committed by
Tejun Heo
cb33f881 4ef42c64

+36 -34
+8 -3
kernel/cgroup/cpuset-internal.h
··· 150 150 */ 151 151 int attach_in_progress; 152 152 153 - /* for custom sched domain */ 154 - int relax_domain_level; 155 - 156 153 /* partition root state */ 157 154 int partition_root_state; 158 155 ··· 179 182 180 183 #ifdef CONFIG_CPUSETS_V1 181 184 struct fmeter fmeter; /* memory_pressure filter */ 185 + 186 + /* for custom sched domain */ 187 + int relax_domain_level; 182 188 #endif 183 189 }; 184 190 ··· 296 296 int cpuset1_validate_change(struct cpuset *cur, struct cpuset *trial); 297 297 void cpuset1_init(struct cpuset *cs); 298 298 void cpuset1_online_css(struct cgroup_subsys_state *css); 299 + void update_domain_attr_tree(struct sched_domain_attr *dattr, 300 + struct cpuset *root_cs); 299 301 #else 300 302 static inline void cpuset1_update_task_spread_flags(struct cpuset *cs, 301 303 struct task_struct *tsk) {} ··· 309 307 struct cpuset *trial) { return 0; } 310 308 static inline void cpuset1_init(struct cpuset *cs) {} 311 309 static inline void cpuset1_online_css(struct cgroup_subsys_state *css) {} 310 + static inline void update_domain_attr_tree(struct sched_domain_attr *dattr, 311 + struct cpuset *root_cs) {} 312 + 312 313 #endif /* CONFIG_CPUSETS_V1 */ 313 314 314 315 #endif /* __CPUSET_INTERNAL_H */
+28
kernel/cgroup/cpuset-v1.c
··· 502 502 void cpuset1_init(struct cpuset *cs) 503 503 { 504 504 fmeter_init(&cs->fmeter); 505 + cs->relax_domain_level = -1; 505 506 } 506 507 507 508 void cpuset1_online_css(struct cgroup_subsys_state *css) ··· 551 550 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); 552 551 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); 553 552 cpuset_callback_unlock_irq(); 553 + } 554 + 555 + static void 556 + update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) 557 + { 558 + if (dattr->relax_domain_level < c->relax_domain_level) 559 + dattr->relax_domain_level = c->relax_domain_level; 560 + } 561 + 562 + void update_domain_attr_tree(struct sched_domain_attr *dattr, 563 + struct cpuset *root_cs) 564 + { 565 + struct cpuset *cp; 566 + struct cgroup_subsys_state *pos_css; 567 + 568 + rcu_read_lock(); 569 + cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { 570 + /* skip the whole subtree if @cp doesn't have any CPU */ 571 + if (cpumask_empty(cp->cpus_allowed)) { 572 + pos_css = css_rightmost_descendant(pos_css); 573 + continue; 574 + } 575 + 576 + if (is_sched_load_balance(cp)) 577 + update_domain_attr(dattr, cp); 578 + } 579 + rcu_read_unlock(); 554 580 } 555 581 556 582 /*
-31
kernel/cgroup/cpuset.c
··· 215 215 .flags = BIT(CS_CPU_EXCLUSIVE) | 216 216 BIT(CS_MEM_EXCLUSIVE) | BIT(CS_SCHED_LOAD_BALANCE), 217 217 .partition_root_state = PRS_ROOT, 218 - .relax_domain_level = -1, 219 - .remote_partition = false, 220 218 }; 221 219 222 220 /* ··· 751 753 static int cpusets_overlap(struct cpuset *a, struct cpuset *b) 752 754 { 753 755 return cpumask_intersects(a->effective_cpus, b->effective_cpus); 754 - } 755 - 756 - static void 757 - update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) 758 - { 759 - if (dattr->relax_domain_level < c->relax_domain_level) 760 - dattr->relax_domain_level = c->relax_domain_level; 761 - return; 762 - } 763 - 764 - static void update_domain_attr_tree(struct sched_domain_attr *dattr, 765 - struct cpuset *root_cs) 766 - { 767 - struct cpuset *cp; 768 - struct cgroup_subsys_state *pos_css; 769 - 770 - rcu_read_lock(); 771 - cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { 772 - /* skip the whole subtree if @cp doesn't have any CPU */ 773 - if (cpumask_empty(cp->cpus_allowed)) { 774 - pos_css = css_rightmost_descendant(pos_css); 775 - continue; 776 - } 777 - 778 - if (is_sched_load_balance(cp)) 779 - update_domain_attr(dattr, cp); 780 - } 781 - rcu_read_unlock(); 782 756 } 783 757 784 758 /* Must be called with cpuset_mutex held. */ ··· 3573 3603 3574 3604 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 3575 3605 cpuset1_init(cs); 3576 - cs->relax_domain_level = -1; 3577 3606 3578 3607 /* Set CS_MEMORY_MIGRATE for default hierarchy */ 3579 3608 if (cpuset_v2())