···14061406extern unsigned int sysctl_sched_batch_wakeup_granularity;14071407extern unsigned int sysctl_sched_stat_granularity;14081408extern unsigned int sysctl_sched_runtime_limit;14091409+extern unsigned int sysctl_sched_compat_yield;14091410extern unsigned int sysctl_sched_child_runs_first;14101411extern unsigned int sysctl_sched_features;14111412
+6-4
kernel/sched.c
···1682168216831683 p->prio = effective_prio(p);1684168416851685+ if (rt_prio(p->prio))16861686+ p->sched_class = &rt_sched_class;16871687+ else16881688+ p->sched_class = &fair_sched_class;16891689+16851690 if (!p->sched_class->task_new || !sysctl_sched_child_runs_first ||16861691 (clone_flags & CLONE_VM) || task_cpu(p) != this_cpu ||16871692 !current->se.on_rq) {···45554550 struct rq *rq = this_rq_lock();4556455145574552 schedstat_inc(rq, yld_cnt);45584558- if (unlikely(rq->nr_running == 1))45594559- schedstat_inc(rq, yld_act_empty);45604560- else45614561- current->sched_class->yield_task(rq, current);45534553+ current->sched_class->yield_task(rq, current);4562455445634555 /*45644556 * Since we are going to call schedule() anyway, there's
+57-6
kernel/sched_fair.c
···4343unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL;44444545/*4646+ * sys_sched_yield() compat mode4747+ *4848+ * This option switches the agressive yield implementation of the4949+ * old scheduler back on.5050+ */5151+unsigned int __read_mostly sysctl_sched_compat_yield;5252+5353+/*4654 * SCHED_BATCH wake-up granularity.4755 * (default: 25 msec, units: nanoseconds)4856 *···905897}906898907899/*908908- * sched_yield() support is very simple - we dequeue and enqueue900900+ * sched_yield() support is very simple - we dequeue and enqueue.901901+ *902902+ * If compat_yield is turned on then we requeue to the end of the tree.909903 */910904static void yield_task_fair(struct rq *rq, struct task_struct *p)911905{912906 struct cfs_rq *cfs_rq = task_cfs_rq(p);907907+ struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;908908+ struct sched_entity *rightmost, *se = &p->se;909909+ struct rb_node *parent;913910914914- __update_rq_clock(rq);915911 /*916916- * Dequeue and enqueue the task to update its917917- * position within the tree:912912+ * Are we the only task in the tree?918913 */919919- dequeue_entity(cfs_rq, &p->se, 0);920920- enqueue_entity(cfs_rq, &p->se, 0);914914+ if (unlikely(cfs_rq->nr_running == 1))915915+ return;916916+917917+ if (likely(!sysctl_sched_compat_yield)) {918918+ __update_rq_clock(rq);919919+ /*920920+ * Dequeue and enqueue the task to update its921921+ * position within the tree:922922+ */923923+ dequeue_entity(cfs_rq, &p->se, 0);924924+ enqueue_entity(cfs_rq, &p->se, 0);925925+926926+ return;927927+ }928928+ /*929929+ * Find the rightmost entry in the rbtree:930930+ */931931+ do {932932+ parent = *link;933933+ link = &parent->rb_right;934934+ } while (*link);935935+936936+ rightmost = rb_entry(parent, struct sched_entity, run_node);937937+ /*938938+ * Already in the rightmost position?939939+ */940940+ if (unlikely(rightmost == se))941941+ return;942942+943943+ /*944944+ * Minimally necessary key value to be last in the tree:945945+ */946946+ se->fair_key = rightmost->fair_key + 1;947947+948948+ if (cfs_rq->rb_leftmost == &se->run_node)949949+ cfs_rq->rb_leftmost = rb_next(&se->run_node);950950+ /*951951+ * Relink the task to the rightmost position:952952+ */953953+ rb_erase(&se->run_node, &cfs_rq->tasks_timeline);954954+ rb_link_node(&se->run_node, parent, link);955955+ rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);921956}922957923958/*