Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * menu.c - the menu idle governor
4 *
5 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
6 * Copyright (C) 2009 Intel Corporation
7 * Author:
8 * Arjan van de Ven <arjan@linux.intel.com>
9 */
10
11#include <linux/kernel.h>
12#include <linux/cpuidle.h>
13#include <linux/time.h>
14#include <linux/ktime.h>
15#include <linux/hrtimer.h>
16#include <linux/tick.h>
17#include <linux/sched/stat.h>
18#include <linux/math64.h>
19
20#include "gov.h"
21
22#define BUCKETS 6
23#define INTERVAL_SHIFT 3
24#define INTERVALS (1UL << INTERVAL_SHIFT)
25#define RESOLUTION 1024
26#define DECAY 8
27#define MAX_INTERESTING (50000 * NSEC_PER_USEC)
28
29/*
30 * Concepts and ideas behind the menu governor
31 *
32 * For the menu governor, there are 2 decision factors for picking a C
33 * state:
34 * 1) Energy break even point
35 * 2) Latency tolerance (from pmqos infrastructure)
36 * These two factors are treated independently.
37 *
38 * Energy break even point
39 * -----------------------
40 * C state entry and exit have an energy cost, and a certain amount of time in
41 * the C state is required to actually break even on this cost. CPUIDLE
42 * provides us this duration in the "target_residency" field. So all that we
43 * need is a good prediction of how long we'll be idle. Like the traditional
44 * menu governor, we take the actual known "next timer event" time.
45 *
46 * Since there are other source of wakeups (interrupts for example) than
47 * the next timer event, this estimation is rather optimistic. To get a
48 * more realistic estimate, a correction factor is applied to the estimate,
49 * that is based on historic behavior. For example, if in the past the actual
50 * duration always was 50% of the next timer tick, the correction factor will
51 * be 0.5.
52 *
53 * menu uses a running average for this correction factor, but it uses a set of
54 * factors, not just a single factor. This stems from the realization that the
55 * ratio is dependent on the order of magnitude of the expected duration; if we
56 * expect 500 milliseconds of idle time the likelihood of getting an interrupt
57 * very early is much higher than if we expect 50 micro seconds of idle time.
58 * For this reason, menu keeps an array of 6 independent factors, that gets
59 * indexed based on the magnitude of the expected duration.
60 *
61 * Repeatable-interval-detector
62 * ----------------------------
63 * There are some cases where "next timer" is a completely unusable predictor:
64 * Those cases where the interval is fixed, for example due to hardware
65 * interrupt mitigation, but also due to fixed transfer rate devices like mice.
66 * For this, we use a different predictor: We track the duration of the last 8
67 * intervals and use them to estimate the duration of the next one.
68 */
69
70struct menu_device {
71 int needs_update;
72 int tick_wakeup;
73
74 u64 next_timer_ns;
75 unsigned int bucket;
76 unsigned int correction_factor[BUCKETS];
77 unsigned int intervals[INTERVALS];
78 int interval_ptr;
79};
80
81static inline int which_bucket(u64 duration_ns)
82{
83 int bucket = 0;
84
85 if (duration_ns < 10ULL * NSEC_PER_USEC)
86 return bucket;
87 if (duration_ns < 100ULL * NSEC_PER_USEC)
88 return bucket + 1;
89 if (duration_ns < 1000ULL * NSEC_PER_USEC)
90 return bucket + 2;
91 if (duration_ns < 10000ULL * NSEC_PER_USEC)
92 return bucket + 3;
93 if (duration_ns < 100000ULL * NSEC_PER_USEC)
94 return bucket + 4;
95 return bucket + 5;
96}
97
98static DEFINE_PER_CPU(struct menu_device, menu_devices);
99
100static void menu_update_intervals(struct menu_device *data, unsigned int interval_us)
101{
102 /* Update the repeating-pattern data. */
103 data->intervals[data->interval_ptr++] = interval_us;
104 if (data->interval_ptr >= INTERVALS)
105 data->interval_ptr = 0;
106}
107
108static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
109
110/*
111 * Try detecting repeating patterns by keeping track of the last 8
112 * intervals, and checking if the standard deviation of that set
113 * of points is below a threshold. If it is... then use the
114 * average of these 8 points as the estimated value.
115 */
116static unsigned int get_typical_interval(struct menu_device *data)
117{
118 s64 value, min_thresh = -1, max_thresh = UINT_MAX;
119 unsigned int max, min, divisor;
120 u64 avg, variance, avg_sq;
121 int i;
122
123again:
124 /* Compute the average and variance of past intervals. */
125 max = 0;
126 min = UINT_MAX;
127 avg = 0;
128 variance = 0;
129 divisor = 0;
130 for (i = 0; i < INTERVALS; i++) {
131 value = data->intervals[i];
132 /*
133 * Discard the samples outside the interval between the min and
134 * max thresholds.
135 */
136 if (value <= min_thresh || value >= max_thresh)
137 continue;
138
139 divisor++;
140
141 avg += value;
142 variance += value * value;
143
144 if (value > max)
145 max = value;
146
147 if (value < min)
148 min = value;
149 }
150
151 if (!max)
152 return UINT_MAX;
153
154 if (divisor == INTERVALS) {
155 avg >>= INTERVAL_SHIFT;
156 variance >>= INTERVAL_SHIFT;
157 } else {
158 do_div(avg, divisor);
159 do_div(variance, divisor);
160 }
161
162 avg_sq = avg * avg;
163 variance -= avg_sq;
164
165 /*
166 * The typical interval is obtained when standard deviation is
167 * small (stddev <= 20 us, variance <= 400 us^2) or standard
168 * deviation is small compared to the average interval (avg >
169 * 6*stddev, avg^2 > 36*variance). The average is smaller than
170 * UINT_MAX aka U32_MAX, so computing its square does not
171 * overflow a u64. We simply reject this candidate average if
172 * the standard deviation is greater than 715 s (which is
173 * rather unlikely).
174 *
175 * Use this result only if there is no timer to wake us up sooner.
176 */
177 if (likely(variance <= U64_MAX/36)) {
178 if ((avg_sq > variance * 36 && divisor * 4 >= INTERVALS * 3) ||
179 variance <= 400)
180 return avg;
181 }
182
183 /*
184 * If there are outliers, discard them by setting thresholds to exclude
185 * data points at a large enough distance from the average, then
186 * calculate the average and standard deviation again. Once we get
187 * down to the last 3/4 of our samples, stop excluding samples.
188 *
189 * This can deal with workloads that have long pauses interspersed
190 * with sporadic activity with a bunch of short pauses.
191 *
192 * However, if the number of remaining samples is too small to exclude
193 * any more outliers, allow the deepest available idle state to be
194 * selected because there are systems where the time spent by CPUs in
195 * deep idle states is correlated to the maximum frequency the CPUs
196 * can get to. On those systems, shallow idle states should be avoided
197 * unless there is a clear indication that the given CPU is most likley
198 * going to be woken up shortly.
199 */
200 if (divisor * 4 <= INTERVALS * 3)
201 return UINT_MAX;
202
203 /* Update the thresholds for the next round. */
204 if (avg - min > max - avg)
205 min_thresh = min;
206 else
207 max_thresh = max;
208
209 goto again;
210}
211
212/**
213 * menu_select - selects the next idle state to enter
214 * @drv: cpuidle driver containing state data
215 * @dev: the CPU
216 * @stop_tick: indication on whether or not to stop the tick
217 */
218static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
219 bool *stop_tick)
220{
221 struct menu_device *data = this_cpu_ptr(&menu_devices);
222 s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
223 u64 predicted_ns;
224 ktime_t delta, delta_tick;
225 int i, idx;
226
227 if (data->needs_update) {
228 menu_update(drv, dev);
229 data->needs_update = 0;
230 } else if (!dev->last_residency_ns) {
231 /*
232 * This happens when the driver rejects the previously selected
233 * idle state and returns an error, so update the recent
234 * intervals table to prevent invalid information from being
235 * used going forward.
236 */
237 menu_update_intervals(data, UINT_MAX);
238 }
239
240 /* Find the shortest expected idle interval. */
241 predicted_ns = get_typical_interval(data) * NSEC_PER_USEC;
242 if (predicted_ns > RESIDENCY_THRESHOLD_NS || tick_nohz_tick_stopped()) {
243 unsigned int timer_us;
244
245 /* Determine the time till the closest timer. */
246 delta = tick_nohz_get_sleep_length(&delta_tick);
247 if (unlikely(delta < 0)) {
248 delta = 0;
249 delta_tick = 0;
250 }
251
252 data->next_timer_ns = delta;
253 data->bucket = which_bucket(data->next_timer_ns);
254
255 /* Round up the result for half microseconds. */
256 timer_us = div_u64((RESOLUTION * DECAY * NSEC_PER_USEC) / 2 +
257 data->next_timer_ns *
258 data->correction_factor[data->bucket],
259 RESOLUTION * DECAY * NSEC_PER_USEC);
260 /* Use the lowest expected idle interval to pick the idle state. */
261 predicted_ns = min((u64)timer_us * NSEC_PER_USEC, predicted_ns);
262 /*
263 * If the tick is already stopped, the cost of possible short
264 * idle duration misprediction is higher because the CPU may get
265 * stuck in a shallow idle state then. To avoid that, if
266 * predicted_ns is small enough, say it might be mispredicted
267 * and use the known time till the closest timer for idle state
268 * selection unless that timer is going to trigger within
269 * SAFE_TIMER_RANGE_NS in which case it can be regarded as a
270 * sufficient safety net.
271 */
272 if (tick_nohz_tick_stopped() && predicted_ns < TICK_NSEC &&
273 data->next_timer_ns > SAFE_TIMER_RANGE_NS)
274 predicted_ns = data->next_timer_ns;
275 } else {
276 /*
277 * Because the next timer event is not going to be determined
278 * in this case, assume that without the tick the closest timer
279 * will be in distant future and that the closest tick will occur
280 * after 1/2 of the tick period.
281 */
282 data->next_timer_ns = KTIME_MAX;
283 delta_tick = TICK_NSEC / 2;
284 data->bucket = BUCKETS - 1;
285 }
286
287 if (latency_req == 0 ||
288 ((data->next_timer_ns < drv->states[1].target_residency_ns ||
289 latency_req < drv->states[1].exit_latency_ns) &&
290 !dev->states_usage[0].disable)) {
291 /*
292 * In this case state[0] will be used no matter what, so return
293 * it right away and keep the tick running if state[0] is a
294 * polling one.
295 */
296 *stop_tick = !(drv->states[0].flags & CPUIDLE_FLAG_POLLING);
297 return 0;
298 }
299
300 /*
301 * Find the idle state with the lowest power while satisfying
302 * our constraints.
303 */
304 idx = -1;
305 for (i = 0; i < drv->state_count; i++) {
306 struct cpuidle_state *s = &drv->states[i];
307
308 if (dev->states_usage[i].disable)
309 continue;
310
311 if (idx == -1)
312 idx = i; /* first enabled state */
313
314 if (s->exit_latency_ns > latency_req)
315 break;
316
317 if (s->target_residency_ns <= predicted_ns) {
318 idx = i;
319 continue;
320 }
321
322 /*
323 * Use a physical idle state instead of busy polling so long as
324 * its target residency is below the residency threshold, its
325 * exit latency is not greater than the predicted idle duration,
326 * and the next timer doesn't expire soon.
327 */
328 if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
329 s->target_residency_ns < RESIDENCY_THRESHOLD_NS &&
330 s->target_residency_ns <= data->next_timer_ns &&
331 s->exit_latency_ns <= predicted_ns) {
332 predicted_ns = s->target_residency_ns;
333 idx = i;
334 break;
335 }
336
337 if (predicted_ns < TICK_NSEC)
338 break;
339
340 if (!tick_nohz_tick_stopped()) {
341 /*
342 * If the state selected so far is shallow, waking up
343 * early won't hurt, so retain the tick in that case and
344 * let the governor run again in the next iteration of
345 * the idle loop.
346 */
347 predicted_ns = drv->states[idx].target_residency_ns;
348 break;
349 }
350
351 /*
352 * If the state selected so far is shallow and this state's
353 * target residency matches the time till the closest timer
354 * event, select this one to avoid getting stuck in the shallow
355 * one for too long.
356 */
357 if (drv->states[idx].target_residency_ns < TICK_NSEC &&
358 s->target_residency_ns <= delta_tick)
359 idx = i;
360
361 return idx;
362 }
363
364 if (idx == -1)
365 idx = 0; /* No states enabled. Must use 0. */
366
367 /*
368 * Don't stop the tick if the selected state is a polling one or if the
369 * expected idle duration is shorter than the tick period length.
370 */
371 if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
372 predicted_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) {
373 *stop_tick = false;
374
375 if (idx > 0 && drv->states[idx].target_residency_ns > delta_tick) {
376 /*
377 * The tick is not going to be stopped and the target
378 * residency of the state to be returned is not within
379 * the time until the next timer event including the
380 * tick, so try to correct that.
381 */
382 for (i = idx - 1; i >= 0; i--) {
383 if (dev->states_usage[i].disable)
384 continue;
385
386 idx = i;
387 if (drv->states[i].target_residency_ns <= delta_tick)
388 break;
389 }
390 }
391 }
392
393 return idx;
394}
395
396/**
397 * menu_reflect - records that data structures need update
398 * @dev: the CPU
399 * @index: the index of actual entered state
400 *
401 * NOTE: it's important to be fast here because this operation will add to
402 * the overall exit latency.
403 */
404static void menu_reflect(struct cpuidle_device *dev, int index)
405{
406 struct menu_device *data = this_cpu_ptr(&menu_devices);
407
408 dev->last_state_idx = index;
409 data->needs_update = 1;
410 data->tick_wakeup = tick_nohz_idle_got_tick();
411}
412
413/**
414 * menu_update - attempts to guess what happened after entry
415 * @drv: cpuidle driver containing state data
416 * @dev: the CPU
417 */
418static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
419{
420 struct menu_device *data = this_cpu_ptr(&menu_devices);
421 int last_idx = dev->last_state_idx;
422 struct cpuidle_state *target = &drv->states[last_idx];
423 u64 measured_ns;
424 unsigned int new_factor;
425
426 /*
427 * Try to figure out how much time passed between entry to low
428 * power state and occurrence of the wakeup event.
429 *
430 * If the entered idle state didn't support residency measurements,
431 * we use them anyway if they are short, and if long,
432 * truncate to the whole expected time.
433 *
434 * Any measured amount of time will include the exit latency.
435 * Since we are interested in when the wakeup begun, not when it
436 * was completed, we must subtract the exit latency. However, if
437 * the measured amount of time is less than the exit latency,
438 * assume the state was never reached and the exit latency is 0.
439 */
440
441 if (data->tick_wakeup && data->next_timer_ns > TICK_NSEC) {
442 /*
443 * The nohz code said that there wouldn't be any events within
444 * the tick boundary (if the tick was stopped), but the idle
445 * duration predictor had a differing opinion. Since the CPU
446 * was woken up by a tick (that wasn't stopped after all), the
447 * predictor was not quite right, so assume that the CPU could
448 * have been idle long (but not forever) to help the idle
449 * duration predictor do a better job next time.
450 */
451 measured_ns = 9 * MAX_INTERESTING / 10;
452 } else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) &&
453 dev->poll_time_limit) {
454 /*
455 * The CPU exited the "polling" state due to a time limit, so
456 * the idle duration prediction leading to the selection of that
457 * state was inaccurate. If a better prediction had been made,
458 * the CPU might have been woken up from idle by the next timer.
459 * Assume that to be the case.
460 */
461 measured_ns = data->next_timer_ns;
462 } else {
463 /* measured value */
464 measured_ns = dev->last_residency_ns;
465
466 /* Deduct exit latency */
467 if (measured_ns > 2 * target->exit_latency_ns)
468 measured_ns -= target->exit_latency_ns;
469 else
470 measured_ns /= 2;
471 }
472
473 /* Make sure our coefficients do not exceed unity */
474 if (measured_ns > data->next_timer_ns)
475 measured_ns = data->next_timer_ns;
476
477 /* Update our correction ratio */
478 new_factor = data->correction_factor[data->bucket];
479 new_factor -= new_factor / DECAY;
480
481 if (data->next_timer_ns > 0 && measured_ns < MAX_INTERESTING)
482 new_factor += div64_u64(RESOLUTION * measured_ns,
483 data->next_timer_ns);
484 else
485 /*
486 * we were idle so long that we count it as a perfect
487 * prediction
488 */
489 new_factor += RESOLUTION;
490
491 /*
492 * We don't want 0 as factor; we always want at least
493 * a tiny bit of estimated time. Fortunately, due to rounding,
494 * new_factor will stay nonzero regardless of measured_us values
495 * and the compiler can eliminate this test as long as DECAY > 1.
496 */
497 if (DECAY == 1 && unlikely(new_factor == 0))
498 new_factor = 1;
499
500 data->correction_factor[data->bucket] = new_factor;
501
502 menu_update_intervals(data, ktime_to_us(measured_ns));
503}
504
505/**
506 * menu_enable_device - scans a CPU's states and does setup
507 * @drv: cpuidle driver
508 * @dev: the CPU
509 */
510static int menu_enable_device(struct cpuidle_driver *drv,
511 struct cpuidle_device *dev)
512{
513 struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
514 int i;
515
516 memset(data, 0, sizeof(struct menu_device));
517
518 /*
519 * if the correction factor is 0 (eg first time init or cpu hotplug
520 * etc), we actually want to start out with a unity factor.
521 */
522 for(i = 0; i < BUCKETS; i++)
523 data->correction_factor[i] = RESOLUTION * DECAY;
524
525 return 0;
526}
527
528static struct cpuidle_governor menu_governor = {
529 .name = "menu",
530 .rating = 20,
531 .enable = menu_enable_device,
532 .select = menu_select,
533 .reflect = menu_reflect,
534};
535
536/**
537 * init_menu - initializes the governor
538 */
539static int __init init_menu(void)
540{
541 return cpuidle_register_governor(&menu_governor);
542}
543
544postcore_initcall(init_menu);