Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

cpuset: remove v1-specific code from generate_sched_domains

Following the introduction of cpuset1_generate_sched_domains() for v1
in the previous patch, v1-specific logic can now be removed from the
generic generate_sched_domains(). This patch cleans up the v1-only
code and ensures uf_node is only visible when CONFIG_CPUSETS_V1=y.

Signed-off-by: Chen Ridong <chenridong@huawei.com>
Reviewed-by: Waiman Long <longman@redhat.com>
Signed-off-by: Tejun Heo <tj@kernel.org>

authored by

Chen Ridong and committed by
Tejun Heo
7cc17205 6e1d31ce

+28 -129
+3 -7
kernel/cgroup/cpuset-internal.h
··· 175 175 /* Handle for cpuset.cpus.partition */ 176 176 struct cgroup_file partition_file; 177 177 178 - /* Used to merge intersecting subsets for generate_sched_domains */ 179 - struct uf_node node; 180 - 181 178 #ifdef CONFIG_CPUSETS_V1 182 179 struct fmeter fmeter; /* memory_pressure filter */ 183 180 184 181 /* for custom sched domain */ 185 182 int relax_domain_level; 183 + 184 + /* Used to merge intersecting subsets for generate_sched_domains */ 185 + struct uf_node node; 186 186 #endif 187 187 }; 188 188 ··· 314 314 int cpuset1_validate_change(struct cpuset *cur, struct cpuset *trial); 315 315 void cpuset1_init(struct cpuset *cs); 316 316 void cpuset1_online_css(struct cgroup_subsys_state *css); 317 - void update_domain_attr_tree(struct sched_domain_attr *dattr, 318 - struct cpuset *root_cs); 319 317 int cpuset1_generate_sched_domains(cpumask_var_t **domains, 320 318 struct sched_domain_attr **attributes); 321 319 ··· 328 330 struct cpuset *trial) { return 0; } 329 331 static inline void cpuset1_init(struct cpuset *cs) {} 330 332 static inline void cpuset1_online_css(struct cgroup_subsys_state *css) {} 331 - static inline void update_domain_attr_tree(struct sched_domain_attr *dattr, 332 - struct cpuset *root_cs) {} 333 333 static inline int cpuset1_generate_sched_domains(cpumask_var_t **domains, 334 334 struct sched_domain_attr **attributes) { return 0; }; 335 335
+1 -1
kernel/cgroup/cpuset-v1.c
··· 560 560 dattr->relax_domain_level = c->relax_domain_level; 561 561 } 562 562 563 - void update_domain_attr_tree(struct sched_domain_attr *dattr, 563 + static void update_domain_attr_tree(struct sched_domain_attr *dattr, 564 564 struct cpuset *root_cs) 565 565 { 566 566 struct cpuset *cp;
+24 -121
kernel/cgroup/cpuset.c
··· 789 789 { 790 790 struct cpuset *cp; /* top-down scan of cpusets */ 791 791 struct cpuset **csa; /* array of all cpuset ptrs */ 792 - int csn; /* how many cpuset ptrs in csa so far */ 793 792 int i, j; /* indices for partition finding loops */ 794 793 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */ 795 794 struct sched_domain_attr *dattr; /* attributes for custom domains */ 796 795 int ndoms = 0; /* number of sched domains in result */ 797 - int nslot; /* next empty doms[] struct cpumask slot */ 798 796 struct cgroup_subsys_state *pos_css; 799 - bool root_load_balance = is_sched_load_balance(&top_cpuset); 800 - bool cgrpv2 = cpuset_v2(); 801 - int nslot_update; 802 797 803 - if (!cgrpv2) 798 + if (!cpuset_v2()) 804 799 return cpuset1_generate_sched_domains(domains, attributes); 805 800 806 801 doms = NULL; ··· 803 808 csa = NULL; 804 809 805 810 /* Special case for the 99% of systems with one, full, sched domain */ 806 - if (root_load_balance && cpumask_empty(subpartitions_cpus)) { 807 - single_root_domain: 811 + if (cpumask_empty(subpartitions_cpus)) { 808 812 ndoms = 1; 809 - doms = alloc_sched_domains(ndoms); 810 - if (!doms) 811 - goto done; 812 - 813 - dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); 814 - if (dattr) { 815 - *dattr = SD_ATTR_INIT; 816 - update_domain_attr_tree(dattr, &top_cpuset); 817 - } 818 - cpumask_and(doms[0], top_cpuset.effective_cpus, 819 - housekeeping_cpumask(HK_TYPE_DOMAIN)); 820 - 821 - goto done; 813 + /* !csa will be checked and can be correctly handled */ 814 + goto generate_doms; 822 815 } 823 816 824 817 csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL); 825 818 if (!csa) 826 819 goto done; 827 - csn = 0; 828 820 821 + /* Find how many partitions and cache them to csa[] */ 829 822 rcu_read_lock(); 830 - if (root_load_balance) 831 - csa[csn++] = &top_cpuset; 832 823 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { 833 - if (cp == &top_cpuset) 834 - continue; 835 - 836 - if (cgrpv2) 837 - goto v2; 838 - 839 - /* 840 - * v1: 841 - * Continue traversing beyond @cp iff @cp has some CPUs and 842 - * isn't load balancing. The former is obvious. The 843 - * latter: All child cpusets contain a subset of the 844 - * parent's cpus, so just skip them, and then we call 845 - * update_domain_attr_tree() to calc relax_domain_level of 846 - * the corresponding sched domain. 847 - */ 848 - if (!cpumask_empty(cp->cpus_allowed) && 849 - !(is_sched_load_balance(cp) && 850 - cpumask_intersects(cp->cpus_allowed, 851 - housekeeping_cpumask(HK_TYPE_DOMAIN)))) 852 - continue; 853 - 854 - if (is_sched_load_balance(cp) && 855 - !cpumask_empty(cp->effective_cpus)) 856 - csa[csn++] = cp; 857 - 858 - /* skip @cp's subtree */ 859 - pos_css = css_rightmost_descendant(pos_css); 860 - continue; 861 - 862 - v2: 863 824 /* 864 825 * Only valid partition roots that are not isolated and with 865 - * non-empty effective_cpus will be saved into csn[]. 826 + * non-empty effective_cpus will be saved into csa[]. 866 827 */ 867 828 if ((cp->partition_root_state == PRS_ROOT) && 868 829 !cpumask_empty(cp->effective_cpus)) 869 - csa[csn++] = cp; 830 + csa[ndoms++] = cp; 870 831 871 832 /* 872 833 * Skip @cp's subtree if not a partition root and has no ··· 833 882 } 834 883 rcu_read_unlock(); 835 884 836 - /* 837 - * If there are only isolated partitions underneath the cgroup root, 838 - * we can optimize out unneeded sched domains scanning. 839 - */ 840 - if (root_load_balance && (csn == 1)) 841 - goto single_root_domain; 842 - 843 - for (i = 0; i < csn; i++) 844 - uf_node_init(&csa[i]->node); 845 - 846 - /* Merge overlapping cpusets */ 847 - for (i = 0; i < csn; i++) { 848 - for (j = i + 1; j < csn; j++) { 849 - if (cpusets_overlap(csa[i], csa[j])) { 885 + for (i = 0; i < ndoms; i++) { 886 + for (j = i + 1; j < ndoms; j++) { 887 + if (cpusets_overlap(csa[i], csa[j])) 850 888 /* 851 889 * Cgroup v2 shouldn't pass down overlapping 852 890 * partition root cpusets. 853 891 */ 854 - WARN_ON_ONCE(cgrpv2); 855 - uf_union(&csa[i]->node, &csa[j]->node); 856 - } 892 + WARN_ON_ONCE(1); 857 893 } 858 894 } 859 895 860 - /* Count the total number of domains */ 861 - for (i = 0; i < csn; i++) { 862 - if (uf_find(&csa[i]->node) == &csa[i]->node) 863 - ndoms++; 864 - } 865 - 866 - /* 867 - * Now we know how many domains to create. 868 - * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. 869 - */ 896 + generate_doms: 870 897 doms = alloc_sched_domains(ndoms); 871 898 if (!doms) 872 899 goto done; ··· 861 932 * to SD_ATTR_INIT. Also non-isolating partition root CPUs are a 862 933 * subset of HK_TYPE_DOMAIN housekeeping CPUs. 863 934 */ 864 - if (cgrpv2) { 865 - for (i = 0; i < ndoms; i++) { 866 - /* 867 - * The top cpuset may contain some boot time isolated 868 - * CPUs that need to be excluded from the sched domain. 869 - */ 870 - if (csa[i] == &top_cpuset) 871 - cpumask_and(doms[i], csa[i]->effective_cpus, 872 - housekeeping_cpumask(HK_TYPE_DOMAIN)); 873 - else 874 - cpumask_copy(doms[i], csa[i]->effective_cpus); 875 - if (dattr) 876 - dattr[i] = SD_ATTR_INIT; 877 - } 878 - goto done; 935 + for (i = 0; i < ndoms; i++) { 936 + /* 937 + * The top cpuset may contain some boot time isolated 938 + * CPUs that need to be excluded from the sched domain. 939 + */ 940 + if (!csa || csa[i] == &top_cpuset) 941 + cpumask_and(doms[i], top_cpuset.effective_cpus, 942 + housekeeping_cpumask(HK_TYPE_DOMAIN)); 943 + else 944 + cpumask_copy(doms[i], csa[i]->effective_cpus); 945 + if (dattr) 946 + dattr[i] = SD_ATTR_INIT; 879 947 } 880 - 881 - for (nslot = 0, i = 0; i < csn; i++) { 882 - nslot_update = 0; 883 - for (j = i; j < csn; j++) { 884 - if (uf_find(&csa[j]->node) == &csa[i]->node) { 885 - struct cpumask *dp = doms[nslot]; 886 - 887 - if (i == j) { 888 - nslot_update = 1; 889 - cpumask_clear(dp); 890 - if (dattr) 891 - *(dattr + nslot) = SD_ATTR_INIT; 892 - } 893 - cpumask_or(dp, dp, csa[j]->effective_cpus); 894 - cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN)); 895 - if (dattr) 896 - update_domain_attr_tree(dattr + nslot, csa[j]); 897 - } 898 - } 899 - if (nslot_update) 900 - nslot++; 901 - } 902 - BUG_ON(nslot != ndoms); 903 948 904 949 done: 905 950 kfree(csa);