Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

sched/fair: Cleanup fair_server

The throttle interaction made my brain hurt, make it consistently
about 0 transitions of h_nr_running.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>

+17 -15
+17 -15
kernel/sched/fair.c
··· 5849 5849 /* At this point se is NULL and we are at root level*/ 5850 5850 sub_nr_running(rq, task_delta); 5851 5851 5852 - done: 5853 5852 /* Stop the fair server if throttling resulted in no runnable tasks */ 5854 5853 if (rq_h_nr_running && !rq->cfs.h_nr_running) 5855 5854 dl_server_stop(&rq->fair_server); 5855 + done: 5856 5856 /* 5857 5857 * Note: distribution will already see us throttled via the 5858 5858 * throttled-list. rq->lock protects completion. ··· 5940 5940 goto unthrottle_throttle; 5941 5941 } 5942 5942 5943 + /* Start the fair server if un-throttling resulted in new runnable tasks */ 5944 + if (!rq_h_nr_running && rq->cfs.h_nr_running) 5945 + dl_server_start(&rq->fair_server); 5946 + 5943 5947 /* At this point se is NULL and we are at root level*/ 5944 5948 add_nr_running(rq, task_delta); 5945 5949 5946 5950 unthrottle_throttle: 5947 5951 assert_list_leaf_cfs_rq(rq); 5948 - 5949 - /* Start the fair server if un-throttling resulted in new runnable tasks */ 5950 - if (!rq_h_nr_running && rq->cfs.h_nr_running) 5951 - dl_server_start(&rq->fair_server); 5952 5952 5953 5953 /* Determine whether we need to wake up potentially idle CPU: */ 5954 5954 if (rq->curr == rq->idle && rq->cfs.nr_running) ··· 6771 6771 struct sched_entity *se = &p->se; 6772 6772 int idle_h_nr_running = task_has_idle_policy(p); 6773 6773 int task_new = !(flags & ENQUEUE_WAKEUP); 6774 + int rq_h_nr_running = rq->cfs.h_nr_running; 6774 6775 6775 6776 /* 6776 6777 * The code below (indirectly) updates schedutil which looks at ··· 6780 6779 * estimated utilization, before we update schedutil. 6781 6780 */ 6782 6781 util_est_enqueue(&rq->cfs, p); 6783 - 6784 - if (!throttled_hierarchy(task_cfs_rq(p)) && !rq->cfs.h_nr_running) { 6785 - /* Account for idle runtime */ 6786 - if (!rq->nr_running) 6787 - dl_server_update_idle_time(rq, rq->curr); 6788 - dl_server_start(&rq->fair_server); 6789 - } 6790 6782 6791 6783 /* 6792 6784 * If in_iowait is set, the code below may not trigger any cpufreq ··· 6826 6832 goto enqueue_throttle; 6827 6833 } 6828 6834 6835 + if (!rq_h_nr_running && rq->cfs.h_nr_running) { 6836 + /* Account for idle runtime */ 6837 + if (!rq->nr_running) 6838 + dl_server_update_idle_time(rq, rq->curr); 6839 + dl_server_start(&rq->fair_server); 6840 + } 6841 + 6829 6842 /* At this point se is NULL and we are at root level*/ 6830 6843 add_nr_running(rq, 1); 6831 6844 ··· 6873 6872 int task_sleep = flags & DEQUEUE_SLEEP; 6874 6873 int idle_h_nr_running = task_has_idle_policy(p); 6875 6874 bool was_sched_idle = sched_idle_rq(rq); 6875 + int rq_h_nr_running = rq->cfs.h_nr_running; 6876 6876 6877 6877 util_est_dequeue(&rq->cfs, p); 6878 6878 ··· 6928 6926 /* At this point se is NULL and we are at root level*/ 6929 6927 sub_nr_running(rq, 1); 6930 6928 6929 + if (rq_h_nr_running && !rq->cfs.h_nr_running) 6930 + dl_server_stop(&rq->fair_server); 6931 + 6931 6932 /* balance early to pull high priority tasks */ 6932 6933 if (unlikely(!was_sched_idle && sched_idle_rq(rq))) 6933 6934 rq->next_balance = jiffies; 6934 6935 6935 6936 dequeue_throttle: 6936 - if (!throttled_hierarchy(task_cfs_rq(p)) && !rq->cfs.h_nr_running) 6937 - dl_server_stop(&rq->fair_server); 6938 - 6939 6937 util_est_update(&rq->cfs, p, task_sleep); 6940 6938 hrtick_update(rq); 6941 6939 }