Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
"Various fixlets, mostly related to the (root-only) SCHED_DEADLINE
policy, but also a hotplug bug fix and a fix for a NR_CPUS related
overallocation bug causing a suspend/resume regression"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched: Fix hotplug vs. set_cpus_allowed_ptr()
sched/cpupri: Replace NR_CPUS arrays
sched/deadline: Replace NR_CPUS arrays
sched/deadline: Restrict user params max value to 2^63 ns
sched/deadline: Change sched_getparam() behaviour vs SCHED_DEADLINE
sched: Disallow sched_attr::sched_policy < 0
sched: Make sched_setattr() correctly return -EFBIG

+78 -31
+4 -2
kernel/cpu.c
··· 726 726 727 727 void set_cpu_online(unsigned int cpu, bool online) 728 728 { 729 - if (online) 729 + if (online) { 730 730 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); 731 - else 731 + cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); 732 + } else { 732 733 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); 734 + } 733 735 } 734 736 735 737 void set_cpu_active(unsigned int cpu, bool active)
+39 -16
kernel/sched/core.c
··· 3195 3195 * We ask for the deadline not being zero, and greater or equal 3196 3196 * than the runtime, as well as the period of being zero or 3197 3197 * greater than deadline. Furthermore, we have to be sure that 3198 - * user parameters are above the internal resolution (1us); we 3199 - * check sched_runtime only since it is always the smaller one. 3198 + * user parameters are above the internal resolution of 1us (we 3199 + * check sched_runtime only since it is always the smaller one) and 3200 + * below 2^63 ns (we have to check both sched_deadline and 3201 + * sched_period, as the latter can be zero). 3200 3202 */ 3201 3203 static bool 3202 3204 __checkparam_dl(const struct sched_attr *attr) 3203 3205 { 3204 - return attr && attr->sched_deadline != 0 && 3205 - (attr->sched_period == 0 || 3206 - (s64)(attr->sched_period - attr->sched_deadline) >= 0) && 3207 - (s64)(attr->sched_deadline - attr->sched_runtime ) >= 0 && 3208 - attr->sched_runtime >= (2 << (DL_SCALE - 1)); 3206 + /* deadline != 0 */ 3207 + if (attr->sched_deadline == 0) 3208 + return false; 3209 + 3210 + /* 3211 + * Since we truncate DL_SCALE bits, make sure we're at least 3212 + * that big. 3213 + */ 3214 + if (attr->sched_runtime < (1ULL << DL_SCALE)) 3215 + return false; 3216 + 3217 + /* 3218 + * Since we use the MSB for wrap-around and sign issues, make 3219 + * sure it's not set (mind that period can be equal to zero). 3220 + */ 3221 + if (attr->sched_deadline & (1ULL << 63) || 3222 + attr->sched_period & (1ULL << 63)) 3223 + return false; 3224 + 3225 + /* runtime <= deadline <= period (if period != 0) */ 3226 + if ((attr->sched_period != 0 && 3227 + attr->sched_period < attr->sched_deadline) || 3228 + attr->sched_deadline < attr->sched_runtime) 3229 + return false; 3230 + 3231 + return true; 3209 3232 } 3210 3233 3211 3234 /* ··· 3681 3658 if (!uattr || pid < 0 || flags) 3682 3659 return -EINVAL; 3683 3660 3684 - if (sched_copy_attr(uattr, &attr)) 3685 - return -EFAULT; 3661 + retval = sched_copy_attr(uattr, &attr); 3662 + if (retval) 3663 + return retval; 3664 + 3665 + if (attr.sched_policy < 0) 3666 + return -EINVAL; 3686 3667 3687 3668 rcu_read_lock(); 3688 3669 retval = -ESRCH; ··· 3736 3709 */ 3737 3710 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 3738 3711 { 3739 - struct sched_param lp; 3712 + struct sched_param lp = { .sched_priority = 0 }; 3740 3713 struct task_struct *p; 3741 3714 int retval; 3742 3715 ··· 3753 3726 if (retval) 3754 3727 goto out_unlock; 3755 3728 3756 - if (task_has_dl_policy(p)) { 3757 - retval = -EINVAL; 3758 - goto out_unlock; 3759 - } 3760 - lp.sched_priority = p->rt_priority; 3729 + if (task_has_rt_policy(p)) 3730 + lp.sched_priority = p->rt_priority; 3761 3731 rcu_read_unlock(); 3762 3732 3763 3733 /* ··· 5076 5052 unsigned long action, void *hcpu) 5077 5053 { 5078 5054 switch (action & ~CPU_TASKS_FROZEN) { 5079 - case CPU_STARTING: 5080 5055 case CPU_DOWN_FAILED: 5081 5056 set_cpu_active((long)hcpu, true); 5082 5057 return NOTIFY_OK;
+24 -9
kernel/sched/cpudeadline.c
··· 13 13 14 14 #include <linux/gfp.h> 15 15 #include <linux/kernel.h> 16 + #include <linux/slab.h> 16 17 #include "cpudeadline.h" 17 18 18 19 static inline int parent(int i) ··· 40 39 { 41 40 int cpu_a = cp->elements[a].cpu, cpu_b = cp->elements[b].cpu; 42 41 43 - swap(cp->elements[a], cp->elements[b]); 44 - swap(cp->cpu_to_idx[cpu_a], cp->cpu_to_idx[cpu_b]); 42 + swap(cp->elements[a].cpu, cp->elements[b].cpu); 43 + swap(cp->elements[a].dl , cp->elements[b].dl ); 44 + 45 + swap(cp->elements[cpu_a].idx, cp->elements[cpu_b].idx); 45 46 } 46 47 47 48 static void cpudl_heapify(struct cpudl *cp, int idx) ··· 143 140 WARN_ON(!cpu_present(cpu)); 144 141 145 142 raw_spin_lock_irqsave(&cp->lock, flags); 146 - old_idx = cp->cpu_to_idx[cpu]; 143 + old_idx = cp->elements[cpu].idx; 147 144 if (!is_valid) { 148 145 /* remove item */ 149 146 if (old_idx == IDX_INVALID) { ··· 158 155 cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl; 159 156 cp->elements[old_idx].cpu = new_cpu; 160 157 cp->size--; 161 - cp->cpu_to_idx[new_cpu] = old_idx; 162 - cp->cpu_to_idx[cpu] = IDX_INVALID; 158 + cp->elements[new_cpu].idx = old_idx; 159 + cp->elements[cpu].idx = IDX_INVALID; 163 160 while (old_idx > 0 && dl_time_before( 164 161 cp->elements[parent(old_idx)].dl, 165 162 cp->elements[old_idx].dl)) { ··· 176 173 cp->size++; 177 174 cp->elements[cp->size - 1].dl = 0; 178 175 cp->elements[cp->size - 1].cpu = cpu; 179 - cp->cpu_to_idx[cpu] = cp->size - 1; 176 + cp->elements[cpu].idx = cp->size - 1; 180 177 cpudl_change_key(cp, cp->size - 1, dl); 181 178 cpumask_clear_cpu(cpu, cp->free_cpus); 182 179 } else { ··· 198 195 memset(cp, 0, sizeof(*cp)); 199 196 raw_spin_lock_init(&cp->lock); 200 197 cp->size = 0; 201 - for (i = 0; i < NR_CPUS; i++) 202 - cp->cpu_to_idx[i] = IDX_INVALID; 203 - if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) 198 + 199 + cp->elements = kcalloc(nr_cpu_ids, 200 + sizeof(struct cpudl_item), 201 + GFP_KERNEL); 202 + if (!cp->elements) 204 203 return -ENOMEM; 204 + 205 + if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) { 206 + kfree(cp->elements); 207 + return -ENOMEM; 208 + } 209 + 210 + for_each_possible_cpu(i) 211 + cp->elements[i].idx = IDX_INVALID; 212 + 205 213 cpumask_setall(cp->free_cpus); 206 214 207 215 return 0; ··· 225 211 void cpudl_cleanup(struct cpudl *cp) 226 212 { 227 213 free_cpumask_var(cp->free_cpus); 214 + kfree(cp->elements); 228 215 }
+3 -3
kernel/sched/cpudeadline.h
··· 5 5 6 6 #define IDX_INVALID -1 7 7 8 - struct array_item { 8 + struct cpudl_item { 9 9 u64 dl; 10 10 int cpu; 11 + int idx; 11 12 }; 12 13 13 14 struct cpudl { 14 15 raw_spinlock_t lock; 15 16 int size; 16 - int cpu_to_idx[NR_CPUS]; 17 - struct array_item elements[NR_CPUS]; 18 17 cpumask_var_t free_cpus; 18 + struct cpudl_item *elements; 19 19 }; 20 20 21 21
+7
kernel/sched/cpupri.c
··· 30 30 #include <linux/gfp.h> 31 31 #include <linux/sched.h> 32 32 #include <linux/sched/rt.h> 33 + #include <linux/slab.h> 33 34 #include "cpupri.h" 34 35 35 36 /* Convert between a 140 based task->prio, and our 102 based cpupri */ ··· 219 218 goto cleanup; 220 219 } 221 220 221 + cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL); 222 + if (!cp->cpu_to_pri) 223 + goto cleanup; 224 + 222 225 for_each_possible_cpu(i) 223 226 cp->cpu_to_pri[i] = CPUPRI_INVALID; 227 + 224 228 return 0; 225 229 226 230 cleanup: ··· 242 236 { 243 237 int i; 244 238 239 + kfree(cp->cpu_to_pri); 245 240 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) 246 241 free_cpumask_var(cp->pri_to_cpu[i].mask); 247 242 }
+1 -1
kernel/sched/cpupri.h
··· 17 17 18 18 struct cpupri { 19 19 struct cpupri_vec pri_to_cpu[CPUPRI_NR_PRIORITIES]; 20 - int cpu_to_pri[NR_CPUS]; 20 + int *cpu_to_pri; 21 21 }; 22 22 23 23 #ifdef CONFIG_SMP