Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * drivers/base/power/domain.c - Common code related to device power domains.
4 *
5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6 */
7#define pr_fmt(fmt) "PM: " fmt
8
9#include <linux/delay.h>
10#include <linux/idr.h>
11#include <linux/kernel.h>
12#include <linux/io.h>
13#include <linux/platform_device.h>
14#include <linux/pm_opp.h>
15#include <linux/pm_runtime.h>
16#include <linux/pm_domain.h>
17#include <linux/pm_qos.h>
18#include <linux/pm_clock.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/sched.h>
22#include <linux/suspend.h>
23#include <linux/export.h>
24#include <linux/cpu.h>
25#include <linux/debugfs.h>
26
27/* Provides a unique ID for each genpd device */
28static DEFINE_IDA(genpd_ida);
29
30/* The bus for genpd_providers. */
31static const struct bus_type genpd_provider_bus_type = {
32 .name = "genpd_provider",
33};
34
35/* The parent for genpd_provider devices. */
36static struct device genpd_provider_bus = {
37 .init_name = "genpd_provider",
38};
39
40#define GENPD_RETRY_MAX_MS 250 /* Approximate */
41
42#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
43({ \
44 type (*__routine)(struct device *__d); \
45 type __ret = (type)0; \
46 \
47 __routine = genpd->dev_ops.callback; \
48 if (__routine) { \
49 __ret = __routine(dev); \
50 } \
51 __ret; \
52})
53
54static LIST_HEAD(gpd_list);
55static DEFINE_MUTEX(gpd_list_lock);
56
57struct genpd_lock_ops {
58 void (*lock)(struct generic_pm_domain *genpd);
59 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
60 int (*lock_interruptible)(struct generic_pm_domain *genpd);
61 void (*unlock)(struct generic_pm_domain *genpd);
62};
63
64static void genpd_lock_mtx(struct generic_pm_domain *genpd)
65{
66 mutex_lock(&genpd->mlock);
67}
68
69static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
70 int depth)
71{
72 mutex_lock_nested(&genpd->mlock, depth);
73}
74
75static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
76{
77 return mutex_lock_interruptible(&genpd->mlock);
78}
79
80static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
81{
82 return mutex_unlock(&genpd->mlock);
83}
84
85static const struct genpd_lock_ops genpd_mtx_ops = {
86 .lock = genpd_lock_mtx,
87 .lock_nested = genpd_lock_nested_mtx,
88 .lock_interruptible = genpd_lock_interruptible_mtx,
89 .unlock = genpd_unlock_mtx,
90};
91
92static void genpd_lock_spin(struct generic_pm_domain *genpd)
93 __acquires(&genpd->slock)
94{
95 unsigned long flags;
96
97 spin_lock_irqsave(&genpd->slock, flags);
98 genpd->lock_flags = flags;
99}
100
101static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
102 int depth)
103 __acquires(&genpd->slock)
104{
105 unsigned long flags;
106
107 spin_lock_irqsave_nested(&genpd->slock, flags, depth);
108 genpd->lock_flags = flags;
109}
110
111static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
112 __acquires(&genpd->slock)
113{
114 unsigned long flags;
115
116 spin_lock_irqsave(&genpd->slock, flags);
117 genpd->lock_flags = flags;
118 return 0;
119}
120
121static void genpd_unlock_spin(struct generic_pm_domain *genpd)
122 __releases(&genpd->slock)
123{
124 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
125}
126
127static const struct genpd_lock_ops genpd_spin_ops = {
128 .lock = genpd_lock_spin,
129 .lock_nested = genpd_lock_nested_spin,
130 .lock_interruptible = genpd_lock_interruptible_spin,
131 .unlock = genpd_unlock_spin,
132};
133
134static void genpd_lock_raw_spin(struct generic_pm_domain *genpd)
135 __acquires(&genpd->raw_slock)
136{
137 unsigned long flags;
138
139 raw_spin_lock_irqsave(&genpd->raw_slock, flags);
140 genpd->raw_lock_flags = flags;
141}
142
143static void genpd_lock_nested_raw_spin(struct generic_pm_domain *genpd,
144 int depth)
145 __acquires(&genpd->raw_slock)
146{
147 unsigned long flags;
148
149 raw_spin_lock_irqsave_nested(&genpd->raw_slock, flags, depth);
150 genpd->raw_lock_flags = flags;
151}
152
153static int genpd_lock_interruptible_raw_spin(struct generic_pm_domain *genpd)
154 __acquires(&genpd->raw_slock)
155{
156 unsigned long flags;
157
158 raw_spin_lock_irqsave(&genpd->raw_slock, flags);
159 genpd->raw_lock_flags = flags;
160 return 0;
161}
162
163static void genpd_unlock_raw_spin(struct generic_pm_domain *genpd)
164 __releases(&genpd->raw_slock)
165{
166 raw_spin_unlock_irqrestore(&genpd->raw_slock, genpd->raw_lock_flags);
167}
168
169static const struct genpd_lock_ops genpd_raw_spin_ops = {
170 .lock = genpd_lock_raw_spin,
171 .lock_nested = genpd_lock_nested_raw_spin,
172 .lock_interruptible = genpd_lock_interruptible_raw_spin,
173 .unlock = genpd_unlock_raw_spin,
174};
175
176#define genpd_lock(p) p->lock_ops->lock(p)
177#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
178#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
179#define genpd_unlock(p) p->lock_ops->unlock(p)
180
181#define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON)
182#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
183#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
184#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
185#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
186#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
187#define genpd_is_opp_table_fw(genpd) (genpd->flags & GENPD_FLAG_OPP_TABLE_FW)
188#define genpd_is_dev_name_fw(genpd) (genpd->flags & GENPD_FLAG_DEV_NAME_FW)
189#define genpd_is_no_sync_state(genpd) (genpd->flags & GENPD_FLAG_NO_SYNC_STATE)
190#define genpd_is_no_stay_on(genpd) (genpd->flags & GENPD_FLAG_NO_STAY_ON)
191
192static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
193 const struct generic_pm_domain *genpd)
194{
195 bool ret;
196
197 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
198
199 /*
200 * Warn once if an IRQ safe device is attached to a domain, which
201 * callbacks are allowed to sleep. This indicates a suboptimal
202 * configuration for PM, but it doesn't matter for an always on domain.
203 */
204 if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd))
205 return ret;
206
207 if (ret)
208 dev_warn_once(dev, "PM domain %s will not be powered off\n",
209 dev_name(&genpd->dev));
210
211 return ret;
212}
213
214static int genpd_runtime_suspend(struct device *dev);
215
216/*
217 * Get the generic PM domain for a particular struct device.
218 * This validates the struct device pointer, the PM domain pointer,
219 * and checks that the PM domain pointer is a real generic PM domain.
220 * Any failure results in NULL being returned.
221 */
222static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
223{
224 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
225 return NULL;
226
227 /* A genpd's always have its ->runtime_suspend() callback assigned. */
228 if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
229 return pd_to_genpd(dev->pm_domain);
230
231 return NULL;
232}
233
234/*
235 * This should only be used where we are certain that the pm_domain
236 * attached to the device is a genpd domain.
237 */
238static struct generic_pm_domain *dev_to_genpd(struct device *dev)
239{
240 if (IS_ERR_OR_NULL(dev->pm_domain))
241 return ERR_PTR(-EINVAL);
242
243 return pd_to_genpd(dev->pm_domain);
244}
245
246struct device *dev_to_genpd_dev(struct device *dev)
247{
248 struct generic_pm_domain *genpd = dev_to_genpd(dev);
249
250 if (IS_ERR(genpd))
251 return ERR_CAST(genpd);
252
253 return &genpd->dev;
254}
255
256static int genpd_stop_dev(const struct generic_pm_domain *genpd,
257 struct device *dev)
258{
259 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
260}
261
262static int genpd_start_dev(const struct generic_pm_domain *genpd,
263 struct device *dev)
264{
265 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
266}
267
268static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
269{
270 bool ret = false;
271
272 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
273 ret = !!atomic_dec_and_test(&genpd->sd_count);
274
275 return ret;
276}
277
278static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
279{
280 atomic_inc(&genpd->sd_count);
281 smp_mb__after_atomic();
282}
283
284#ifdef CONFIG_DEBUG_FS
285static struct dentry *genpd_debugfs_dir;
286
287static void genpd_debug_add(struct generic_pm_domain *genpd);
288
289static void genpd_debug_remove(struct generic_pm_domain *genpd)
290{
291 if (!genpd_debugfs_dir)
292 return;
293
294 debugfs_lookup_and_remove(dev_name(&genpd->dev), genpd_debugfs_dir);
295}
296
297static void genpd_update_accounting(struct generic_pm_domain *genpd)
298{
299 u64 delta, now;
300
301 now = ktime_get_mono_fast_ns();
302 if (now <= genpd->accounting_time)
303 return;
304
305 delta = now - genpd->accounting_time;
306
307 /*
308 * If genpd->status is active, it means we are just
309 * out of off and so update the idle time and vice
310 * versa.
311 */
312 if (genpd->status == GENPD_STATE_ON)
313 genpd->states[genpd->state_idx].idle_time += delta;
314 else
315 genpd->on_time += delta;
316
317 genpd->accounting_time = now;
318}
319
320static void genpd_reflect_residency(struct generic_pm_domain *genpd)
321{
322 struct genpd_governor_data *gd = genpd->gd;
323 struct genpd_power_state *state, *next_state;
324 unsigned int state_idx;
325 s64 sleep_ns, target_ns;
326
327 if (!gd || !gd->reflect_residency)
328 return;
329
330 sleep_ns = ktime_to_ns(ktime_sub(ktime_get(), gd->last_enter));
331 state_idx = genpd->state_idx;
332 state = &genpd->states[state_idx];
333 target_ns = state->power_off_latency_ns + state->residency_ns;
334
335 if (sleep_ns < target_ns) {
336 state->above++;
337 } else if (state_idx < (genpd->state_count -1)) {
338 next_state = &genpd->states[state_idx + 1];
339 target_ns = next_state->power_off_latency_ns +
340 next_state->residency_ns;
341
342 if (sleep_ns >= target_ns)
343 state->below++;
344 }
345
346 gd->reflect_residency = false;
347}
348#else
349static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
350static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
351static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
352static inline void genpd_reflect_residency(struct generic_pm_domain *genpd) {}
353#endif
354
355static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
356 unsigned int state)
357{
358 struct generic_pm_domain_data *pd_data;
359 struct pm_domain_data *pdd;
360 struct gpd_link *link;
361
362 /* New requested state is same as Max requested state */
363 if (state == genpd->performance_state)
364 return state;
365
366 /* New requested state is higher than Max requested state */
367 if (state > genpd->performance_state)
368 return state;
369
370 /* Traverse all devices within the domain */
371 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
372 pd_data = to_gpd_data(pdd);
373
374 if (pd_data->performance_state > state)
375 state = pd_data->performance_state;
376 }
377
378 /*
379 * Traverse all sub-domains within the domain. This can be
380 * done without any additional locking as the link->performance_state
381 * field is protected by the parent genpd->lock, which is already taken.
382 *
383 * Also note that link->performance_state (subdomain's performance state
384 * requirement to parent domain) is different from
385 * link->child->performance_state (current performance state requirement
386 * of the devices/sub-domains of the subdomain) and so can have a
387 * different value.
388 *
389 * Note that we also take vote from powered-off sub-domains into account
390 * as the same is done for devices right now.
391 */
392 list_for_each_entry(link, &genpd->parent_links, parent_node) {
393 if (link->performance_state > state)
394 state = link->performance_state;
395 }
396
397 return state;
398}
399
400static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
401 struct generic_pm_domain *parent,
402 unsigned int pstate)
403{
404 if (!parent->set_performance_state)
405 return pstate;
406
407 return dev_pm_opp_xlate_performance_state(genpd->opp_table,
408 parent->opp_table,
409 pstate);
410}
411
412static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
413 unsigned int state, int depth);
414
415static void _genpd_rollback_parent_state(struct gpd_link *link, int depth)
416{
417 struct generic_pm_domain *parent = link->parent;
418 int parent_state;
419
420 genpd_lock_nested(parent, depth + 1);
421
422 parent_state = link->prev_performance_state;
423 link->performance_state = parent_state;
424
425 parent_state = _genpd_reeval_performance_state(parent, parent_state);
426 if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
427 pr_err("%s: Failed to roll back to %d performance state\n",
428 parent->name, parent_state);
429 }
430
431 genpd_unlock(parent);
432}
433
434static int _genpd_set_parent_state(struct generic_pm_domain *genpd,
435 struct gpd_link *link,
436 unsigned int state, int depth)
437{
438 struct generic_pm_domain *parent = link->parent;
439 int parent_state, ret;
440
441 /* Find parent's performance state */
442 ret = genpd_xlate_performance_state(genpd, parent, state);
443 if (unlikely(ret < 0))
444 return ret;
445
446 parent_state = ret;
447
448 genpd_lock_nested(parent, depth + 1);
449
450 link->prev_performance_state = link->performance_state;
451 link->performance_state = parent_state;
452
453 parent_state = _genpd_reeval_performance_state(parent, parent_state);
454 ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
455 if (ret)
456 link->performance_state = link->prev_performance_state;
457
458 genpd_unlock(parent);
459
460 return ret;
461}
462
463static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
464 unsigned int state, int depth)
465{
466 struct gpd_link *link = NULL;
467 int ret;
468
469 if (state == genpd->performance_state)
470 return 0;
471
472 /* When scaling up, propagate to parents first in normal order */
473 if (state > genpd->performance_state) {
474 list_for_each_entry(link, &genpd->child_links, child_node) {
475 ret = _genpd_set_parent_state(genpd, link, state, depth);
476 if (ret)
477 goto rollback_parents_up;
478 }
479 }
480
481 if (genpd->set_performance_state) {
482 ret = genpd->set_performance_state(genpd, state);
483 if (ret) {
484 if (link)
485 goto rollback_parents_up;
486 return ret;
487 }
488 }
489
490 /* When scaling down, propagate to parents last in reverse order */
491 if (state < genpd->performance_state) {
492 list_for_each_entry_reverse(link, &genpd->child_links, child_node) {
493 ret = _genpd_set_parent_state(genpd, link, state, depth);
494 if (ret)
495 goto rollback_parents_down;
496 }
497 }
498
499 genpd->performance_state = state;
500 return 0;
501
502rollback_parents_up:
503 list_for_each_entry_continue_reverse(link, &genpd->child_links, child_node)
504 _genpd_rollback_parent_state(link, depth);
505 return ret;
506rollback_parents_down:
507 list_for_each_entry_continue(link, &genpd->child_links, child_node)
508 _genpd_rollback_parent_state(link, depth);
509 return ret;
510}
511
512static int genpd_set_performance_state(struct device *dev, unsigned int state)
513{
514 struct generic_pm_domain *genpd = dev_to_genpd(dev);
515 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
516 unsigned int prev_state;
517 int ret;
518
519 prev_state = gpd_data->performance_state;
520 if (prev_state == state)
521 return 0;
522
523 gpd_data->performance_state = state;
524 state = _genpd_reeval_performance_state(genpd, state);
525
526 ret = _genpd_set_performance_state(genpd, state, 0);
527 if (ret)
528 gpd_data->performance_state = prev_state;
529
530 return ret;
531}
532
533static int genpd_drop_performance_state(struct device *dev)
534{
535 unsigned int prev_state = dev_gpd_data(dev)->performance_state;
536
537 if (!genpd_set_performance_state(dev, 0))
538 return prev_state;
539
540 return 0;
541}
542
543static void genpd_restore_performance_state(struct device *dev,
544 unsigned int state)
545{
546 if (state)
547 genpd_set_performance_state(dev, state);
548}
549
550static int genpd_dev_pm_set_performance_state(struct device *dev,
551 unsigned int state)
552{
553 struct generic_pm_domain *genpd = dev_to_genpd(dev);
554 int ret = 0;
555
556 genpd_lock(genpd);
557 if (pm_runtime_suspended(dev)) {
558 dev_gpd_data(dev)->rpm_pstate = state;
559 } else {
560 ret = genpd_set_performance_state(dev, state);
561 if (!ret)
562 dev_gpd_data(dev)->rpm_pstate = 0;
563 }
564 genpd_unlock(genpd);
565
566 return ret;
567}
568
569/**
570 * dev_pm_genpd_set_performance_state- Set performance state of device's power
571 * domain.
572 *
573 * @dev: Device for which the performance-state needs to be set.
574 * @state: Target performance state of the device. This can be set as 0 when the
575 * device doesn't have any performance state constraints left (And so
576 * the device wouldn't participate anymore to find the target
577 * performance state of the genpd).
578 *
579 * It is assumed that the users guarantee that the genpd wouldn't be detached
580 * while this routine is getting called.
581 *
582 * Returns 0 on success and negative error values on failures.
583 */
584int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
585{
586 struct generic_pm_domain *genpd;
587
588 genpd = dev_to_genpd_safe(dev);
589 if (!genpd)
590 return -ENODEV;
591
592 if (WARN_ON(!dev->power.subsys_data ||
593 !dev->power.subsys_data->domain_data))
594 return -EINVAL;
595
596 return genpd_dev_pm_set_performance_state(dev, state);
597}
598EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
599
600/**
601 * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
602 *
603 * @dev: Device to handle
604 * @next: impending interrupt/wakeup for the device
605 *
606 *
607 * Allow devices to inform of the next wakeup. It's assumed that the users
608 * guarantee that the genpd wouldn't be detached while this routine is getting
609 * called. Additionally, it's also assumed that @dev isn't runtime suspended
610 * (RPM_SUSPENDED)."
611 * Although devices are expected to update the next_wakeup after the end of
612 * their usecase as well, it is possible the devices themselves may not know
613 * about that, so stale @next will be ignored when powering off the domain.
614 */
615void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
616{
617 struct generic_pm_domain *genpd;
618 struct gpd_timing_data *td;
619
620 genpd = dev_to_genpd_safe(dev);
621 if (!genpd)
622 return;
623
624 td = to_gpd_data(dev->power.subsys_data->domain_data)->td;
625 if (td)
626 td->next_wakeup = next;
627}
628EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
629
630/**
631 * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd
632 * @dev: A device that is attached to the genpd.
633 *
634 * This routine should typically be called for a device, at the point of when a
635 * GENPD_NOTIFY_PRE_OFF notification has been sent for it.
636 *
637 * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no
638 * valid value have been set.
639 */
640ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
641{
642 struct generic_pm_domain *genpd;
643
644 genpd = dev_to_genpd_safe(dev);
645 if (!genpd)
646 return KTIME_MAX;
647
648 if (genpd->gd)
649 return genpd->gd->next_hrtimer;
650
651 return KTIME_MAX;
652}
653EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer);
654
655/*
656 * dev_pm_genpd_synced_poweroff - Next power off should be synchronous
657 *
658 * @dev: A device that is attached to the genpd.
659 *
660 * Allows a consumer of the genpd to notify the provider that the next power off
661 * should be synchronous.
662 *
663 * It is assumed that the users guarantee that the genpd wouldn't be detached
664 * while this routine is getting called.
665 */
666void dev_pm_genpd_synced_poweroff(struct device *dev)
667{
668 struct generic_pm_domain *genpd;
669
670 genpd = dev_to_genpd_safe(dev);
671 if (!genpd)
672 return;
673
674 genpd_lock(genpd);
675 genpd->synced_poweroff = true;
676 genpd_unlock(genpd);
677}
678EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff);
679
680/**
681 * dev_pm_genpd_set_hwmode() - Set the HW mode for the device and its PM domain.
682 *
683 * @dev: Device for which the HW-mode should be changed.
684 * @enable: Value to set or unset the HW-mode.
685 *
686 * Some PM domains can rely on HW signals to control the power for a device. To
687 * allow a consumer driver to switch the behaviour for its device in runtime,
688 * which may be beneficial from a latency or energy point of view, this function
689 * may be called.
690 *
691 * It is assumed that the users guarantee that the genpd wouldn't be detached
692 * while this routine is getting called.
693 *
694 * Return: Returns 0 on success and negative error values on failures.
695 */
696int dev_pm_genpd_set_hwmode(struct device *dev, bool enable)
697{
698 struct generic_pm_domain *genpd;
699 int ret = 0;
700
701 genpd = dev_to_genpd_safe(dev);
702 if (!genpd)
703 return -ENODEV;
704
705 if (!genpd->set_hwmode_dev)
706 return -EOPNOTSUPP;
707
708 genpd_lock(genpd);
709
710 if (dev_gpd_data(dev)->hw_mode == enable)
711 goto out;
712
713 ret = genpd->set_hwmode_dev(genpd, dev, enable);
714 if (!ret)
715 dev_gpd_data(dev)->hw_mode = enable;
716
717out:
718 genpd_unlock(genpd);
719 return ret;
720}
721EXPORT_SYMBOL_GPL(dev_pm_genpd_set_hwmode);
722
723/**
724 * dev_pm_genpd_get_hwmode() - Get the HW mode setting for the device.
725 *
726 * @dev: Device for which the current HW-mode setting should be fetched.
727 *
728 * This helper function allows consumer drivers to fetch the current HW mode
729 * setting of its the device.
730 *
731 * It is assumed that the users guarantee that the genpd wouldn't be detached
732 * while this routine is getting called.
733 *
734 * Return: Returns the HW mode setting of device from SW cached hw_mode.
735 */
736bool dev_pm_genpd_get_hwmode(struct device *dev)
737{
738 return dev_gpd_data(dev)->hw_mode;
739}
740EXPORT_SYMBOL_GPL(dev_pm_genpd_get_hwmode);
741
742/**
743 * dev_pm_genpd_rpm_always_on() - Control if the PM domain can be powered off.
744 *
745 * @dev: Device for which the PM domain may need to stay on for.
746 * @on: Value to set or unset for the condition.
747 *
748 * For some usecases a consumer driver requires its device to remain power-on
749 * from the PM domain perspective during runtime. This function allows the
750 * behaviour to be dynamically controlled for a device attached to a genpd.
751 *
752 * It is assumed that the users guarantee that the genpd wouldn't be detached
753 * while this routine is getting called.
754 *
755 * Return: Returns 0 on success and negative error values on failures.
756 */
757int dev_pm_genpd_rpm_always_on(struct device *dev, bool on)
758{
759 struct generic_pm_domain *genpd;
760
761 genpd = dev_to_genpd_safe(dev);
762 if (!genpd)
763 return -ENODEV;
764
765 genpd_lock(genpd);
766 dev_gpd_data(dev)->rpm_always_on = on;
767 genpd_unlock(genpd);
768
769 return 0;
770}
771EXPORT_SYMBOL_GPL(dev_pm_genpd_rpm_always_on);
772
773/**
774 * dev_pm_genpd_is_on() - Get device's current power domain status
775 *
776 * @dev: Device to get the current power status
777 *
778 * This function checks whether the generic power domain associated with the
779 * given device is on or not by verifying if genpd_status_on equals
780 * GENPD_STATE_ON.
781 *
782 * Note: this function returns the power status of the genpd at the time of the
783 * call. The power status may change after due to activity from other devices
784 * sharing the same genpd. Therefore, this information should not be relied for
785 * long-term decisions about the device power state.
786 *
787 * Return: 'true' if the device's power domain is on, 'false' otherwise.
788 */
789bool dev_pm_genpd_is_on(struct device *dev)
790{
791 struct generic_pm_domain *genpd;
792 bool is_on;
793
794 genpd = dev_to_genpd_safe(dev);
795 if (!genpd)
796 return false;
797
798 genpd_lock(genpd);
799 is_on = genpd_status_on(genpd);
800 genpd_unlock(genpd);
801
802 return is_on;
803}
804EXPORT_SYMBOL_GPL(dev_pm_genpd_is_on);
805
806/**
807 * pm_genpd_inc_rejected() - Adjust the rejected/usage counts for an idle-state.
808 *
809 * @genpd: The PM domain the idle-state belongs to.
810 * @state_idx: The index of the idle-state that failed.
811 *
812 * In some special cases the ->power_off() callback is asynchronously powering
813 * off the PM domain, leading to that it may return zero to indicate success,
814 * even though the actual power-off could fail. To account for this correctly in
815 * the rejected/usage counts for the idle-state statistics, users can call this
816 * function to adjust the values.
817 *
818 * It is assumed that the users guarantee that the genpd doesn't get removed
819 * while this routine is getting called.
820 */
821void pm_genpd_inc_rejected(struct generic_pm_domain *genpd,
822 unsigned int state_idx)
823{
824 genpd_lock(genpd);
825 genpd->states[genpd->state_idx].rejected++;
826 genpd->states[genpd->state_idx].usage--;
827 genpd_unlock(genpd);
828}
829EXPORT_SYMBOL_GPL(pm_genpd_inc_rejected);
830
831static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
832{
833 unsigned int state_idx = genpd->state_idx;
834 ktime_t time_start;
835 s64 elapsed_ns;
836 int ret;
837
838 /* Notify consumers that we are about to power on. */
839 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
840 GENPD_NOTIFY_PRE_ON,
841 GENPD_NOTIFY_OFF, NULL);
842 ret = notifier_to_errno(ret);
843 if (ret)
844 return ret;
845
846 if (!genpd->power_on)
847 goto out;
848
849 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
850 if (!timed) {
851 ret = genpd->power_on(genpd);
852 if (ret)
853 goto err;
854
855 goto out;
856 }
857
858 time_start = ktime_get();
859 ret = genpd->power_on(genpd);
860 if (ret)
861 goto err;
862
863 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
864 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
865 goto out;
866
867 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
868 genpd->gd->max_off_time_changed = true;
869 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
870 dev_name(&genpd->dev), "on", elapsed_ns);
871
872out:
873 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
874 genpd->synced_poweroff = false;
875 return 0;
876err:
877 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
878 NULL);
879 return ret;
880}
881
882static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
883{
884 unsigned int state_idx = genpd->state_idx;
885 ktime_t time_start;
886 s64 elapsed_ns;
887 int ret;
888
889 /* Notify consumers that we are about to power off. */
890 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
891 GENPD_NOTIFY_PRE_OFF,
892 GENPD_NOTIFY_ON, NULL);
893 ret = notifier_to_errno(ret);
894 if (ret)
895 return ret;
896
897 if (!genpd->power_off)
898 goto out;
899
900 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
901 if (!timed) {
902 ret = genpd->power_off(genpd);
903 if (ret)
904 goto busy;
905
906 goto out;
907 }
908
909 time_start = ktime_get();
910 ret = genpd->power_off(genpd);
911 if (ret)
912 goto busy;
913
914 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
915 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
916 goto out;
917
918 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
919 genpd->gd->max_off_time_changed = true;
920 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
921 dev_name(&genpd->dev), "off", elapsed_ns);
922
923out:
924 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
925 NULL);
926 return 0;
927busy:
928 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
929 return ret;
930}
931
932/**
933 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
934 * @genpd: PM domain to power off.
935 *
936 * Queue up the execution of genpd_power_off() unless it's already been done
937 * before.
938 */
939static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
940{
941 queue_work(pm_wq, &genpd->power_off_work);
942}
943
944/**
945 * genpd_power_off - Remove power from a given PM domain.
946 * @genpd: PM domain to power down.
947 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
948 * RPM status of the releated device is in an intermediate state, not yet turned
949 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
950 * be RPM_SUSPENDED, while it tries to power off the PM domain.
951 * @depth: nesting count for lockdep.
952 *
953 * If all of the @genpd's devices have been suspended and all of its subdomains
954 * have been powered down, remove power from @genpd.
955 */
956static void genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
957 unsigned int depth)
958{
959 struct pm_domain_data *pdd;
960 struct gpd_link *link;
961 unsigned int not_suspended = 0;
962
963 /*
964 * Do not try to power off the domain in the following situations:
965 * The domain is already in the "power off" state.
966 * System suspend is in progress.
967 * The domain is configured as always on.
968 * The domain was on at boot and still need to stay on.
969 * The domain has a subdomain being powered on.
970 */
971 if (!genpd_status_on(genpd) || genpd->prepared_count > 0 ||
972 genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd) ||
973 genpd->stay_on || atomic_read(&genpd->sd_count) > 0)
974 return;
975
976 /*
977 * The children must be in their deepest (powered-off) states to allow
978 * the parent to be powered off. Note that, there's no need for
979 * additional locking, as powering on a child, requires the parent's
980 * lock to be acquired first.
981 */
982 list_for_each_entry(link, &genpd->parent_links, parent_node) {
983 struct generic_pm_domain *child = link->child;
984 if (child->state_idx < child->state_count - 1)
985 return;
986 }
987
988 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
989 /*
990 * Do not allow PM domain to be powered off, when an IRQ safe
991 * device is part of a non-IRQ safe domain.
992 */
993 if (!pm_runtime_suspended(pdd->dev) ||
994 irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
995 not_suspended++;
996
997 /* The device may need its PM domain to stay powered on. */
998 if (to_gpd_data(pdd)->rpm_always_on)
999 return;
1000 }
1001
1002 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
1003 return;
1004
1005 if (genpd->gov && genpd->gov->power_down_ok) {
1006 if (!genpd->gov->power_down_ok(&genpd->domain))
1007 return;
1008 }
1009
1010 /* Default to shallowest state. */
1011 if (!genpd->gov)
1012 genpd->state_idx = 0;
1013
1014 /* Don't power off, if a child domain is waiting to power on. */
1015 if (atomic_read(&genpd->sd_count) > 0)
1016 return;
1017
1018 if (_genpd_power_off(genpd, true)) {
1019 genpd->states[genpd->state_idx].rejected++;
1020 return;
1021 }
1022
1023 genpd->status = GENPD_STATE_OFF;
1024 genpd_update_accounting(genpd);
1025 genpd->states[genpd->state_idx].usage++;
1026
1027 list_for_each_entry(link, &genpd->child_links, child_node) {
1028 genpd_sd_counter_dec(link->parent);
1029 genpd_lock_nested(link->parent, depth + 1);
1030 genpd_power_off(link->parent, false, depth + 1);
1031 genpd_unlock(link->parent);
1032 }
1033}
1034
1035/**
1036 * genpd_power_on - Restore power to a given PM domain and its parents.
1037 * @genpd: PM domain to power up.
1038 * @depth: nesting count for lockdep.
1039 *
1040 * Restore power to @genpd and all of its parents so that it is possible to
1041 * resume a device belonging to it.
1042 */
1043static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
1044{
1045 struct gpd_link *link;
1046 int ret = 0;
1047
1048 if (genpd_status_on(genpd))
1049 return 0;
1050
1051 /* Reflect over the entered idle-states residency for debugfs. */
1052 genpd_reflect_residency(genpd);
1053
1054 /*
1055 * The list is guaranteed not to change while the loop below is being
1056 * executed, unless one of the parents' .power_on() callbacks fiddles
1057 * with it.
1058 */
1059 list_for_each_entry(link, &genpd->child_links, child_node) {
1060 struct generic_pm_domain *parent = link->parent;
1061
1062 genpd_sd_counter_inc(parent);
1063
1064 genpd_lock_nested(parent, depth + 1);
1065 ret = genpd_power_on(parent, depth + 1);
1066 genpd_unlock(parent);
1067
1068 if (ret) {
1069 genpd_sd_counter_dec(parent);
1070 goto err;
1071 }
1072 }
1073
1074 ret = _genpd_power_on(genpd, true);
1075 if (ret)
1076 goto err;
1077
1078 genpd->status = GENPD_STATE_ON;
1079 genpd_update_accounting(genpd);
1080
1081 return 0;
1082
1083 err:
1084 list_for_each_entry_continue_reverse(link,
1085 &genpd->child_links,
1086 child_node) {
1087 genpd_sd_counter_dec(link->parent);
1088 genpd_lock_nested(link->parent, depth + 1);
1089 genpd_power_off(link->parent, false, depth + 1);
1090 genpd_unlock(link->parent);
1091 }
1092
1093 return ret;
1094}
1095
1096static int genpd_dev_pm_start(struct device *dev)
1097{
1098 struct generic_pm_domain *genpd = dev_to_genpd(dev);
1099
1100 return genpd_start_dev(genpd, dev);
1101}
1102
1103static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
1104 unsigned long val, void *ptr)
1105{
1106 struct generic_pm_domain_data *gpd_data;
1107 struct device *dev;
1108
1109 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
1110 dev = gpd_data->base.dev;
1111
1112 for (;;) {
1113 struct generic_pm_domain *genpd = ERR_PTR(-ENODATA);
1114 struct pm_domain_data *pdd;
1115 struct gpd_timing_data *td;
1116
1117 spin_lock_irq(&dev->power.lock);
1118
1119 pdd = dev->power.subsys_data ?
1120 dev->power.subsys_data->domain_data : NULL;
1121 if (pdd) {
1122 td = to_gpd_data(pdd)->td;
1123 if (td) {
1124 td->constraint_changed = true;
1125 genpd = dev_to_genpd(dev);
1126 }
1127 }
1128
1129 spin_unlock_irq(&dev->power.lock);
1130
1131 if (!IS_ERR(genpd)) {
1132 genpd_lock(genpd);
1133 genpd->gd->max_off_time_changed = true;
1134 genpd_unlock(genpd);
1135 }
1136
1137 dev = dev->parent;
1138 if (!dev || dev->power.ignore_children)
1139 break;
1140 }
1141
1142 return NOTIFY_DONE;
1143}
1144
1145/**
1146 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
1147 * @work: Work structure used for scheduling the execution of this function.
1148 */
1149static void genpd_power_off_work_fn(struct work_struct *work)
1150{
1151 struct generic_pm_domain *genpd;
1152
1153 genpd = container_of(work, struct generic_pm_domain, power_off_work);
1154
1155 genpd_lock(genpd);
1156 genpd_power_off(genpd, false, 0);
1157 genpd_unlock(genpd);
1158}
1159
1160/**
1161 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
1162 * @dev: Device to handle.
1163 */
1164static int __genpd_runtime_suspend(struct device *dev)
1165{
1166 int (*cb)(struct device *__dev);
1167
1168 if (dev->type && dev->type->pm)
1169 cb = dev->type->pm->runtime_suspend;
1170 else if (dev->class && dev->class->pm)
1171 cb = dev->class->pm->runtime_suspend;
1172 else if (dev->bus && dev->bus->pm)
1173 cb = dev->bus->pm->runtime_suspend;
1174 else
1175 cb = NULL;
1176
1177 if (!cb && dev->driver && dev->driver->pm)
1178 cb = dev->driver->pm->runtime_suspend;
1179
1180 return cb ? cb(dev) : 0;
1181}
1182
1183/**
1184 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
1185 * @dev: Device to handle.
1186 */
1187static int __genpd_runtime_resume(struct device *dev)
1188{
1189 int (*cb)(struct device *__dev);
1190
1191 if (dev->type && dev->type->pm)
1192 cb = dev->type->pm->runtime_resume;
1193 else if (dev->class && dev->class->pm)
1194 cb = dev->class->pm->runtime_resume;
1195 else if (dev->bus && dev->bus->pm)
1196 cb = dev->bus->pm->runtime_resume;
1197 else
1198 cb = NULL;
1199
1200 if (!cb && dev->driver && dev->driver->pm)
1201 cb = dev->driver->pm->runtime_resume;
1202
1203 return cb ? cb(dev) : 0;
1204}
1205
1206/**
1207 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
1208 * @dev: Device to suspend.
1209 *
1210 * Carry out a runtime suspend of a device under the assumption that its
1211 * pm_domain field points to the domain member of an object of type
1212 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1213 */
1214static int genpd_runtime_suspend(struct device *dev)
1215{
1216 struct generic_pm_domain *genpd;
1217 bool (*suspend_ok)(struct device *__dev);
1218 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
1219 struct gpd_timing_data *td = gpd_data->td;
1220 bool runtime_pm = pm_runtime_enabled(dev);
1221 ktime_t time_start = 0;
1222 s64 elapsed_ns;
1223 int ret;
1224
1225 dev_dbg(dev, "%s()\n", __func__);
1226
1227 genpd = dev_to_genpd(dev);
1228 if (IS_ERR(genpd))
1229 return -EINVAL;
1230
1231 /*
1232 * A runtime PM centric subsystem/driver may re-use the runtime PM
1233 * callbacks for other purposes than runtime PM. In those scenarios
1234 * runtime PM is disabled. Under these circumstances, we shall skip
1235 * validating/measuring the PM QoS latency.
1236 */
1237 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
1238 if (runtime_pm && suspend_ok && !suspend_ok(dev))
1239 return -EBUSY;
1240
1241 /* Measure suspend latency. */
1242 if (td && runtime_pm)
1243 time_start = ktime_get();
1244
1245 ret = __genpd_runtime_suspend(dev);
1246 if (ret)
1247 return ret;
1248
1249 ret = genpd_stop_dev(genpd, dev);
1250 if (ret) {
1251 __genpd_runtime_resume(dev);
1252 return ret;
1253 }
1254
1255 /* Update suspend latency value if the measured time exceeds it. */
1256 if (td && runtime_pm) {
1257 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1258 if (elapsed_ns > td->suspend_latency_ns) {
1259 td->suspend_latency_ns = elapsed_ns;
1260 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
1261 elapsed_ns);
1262 genpd->gd->max_off_time_changed = true;
1263 td->constraint_changed = true;
1264 }
1265 }
1266
1267 /*
1268 * If power.irq_safe is set, this routine may be run with
1269 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
1270 */
1271 if (irq_safe_dev_in_sleep_domain(dev, genpd))
1272 return 0;
1273
1274 genpd_lock(genpd);
1275 genpd_power_off(genpd, true, 0);
1276 gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1277 genpd_unlock(genpd);
1278
1279 return 0;
1280}
1281
1282/**
1283 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
1284 * @dev: Device to resume.
1285 *
1286 * Carry out a runtime resume of a device under the assumption that its
1287 * pm_domain field points to the domain member of an object of type
1288 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1289 */
1290static int genpd_runtime_resume(struct device *dev)
1291{
1292 struct generic_pm_domain *genpd;
1293 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
1294 struct gpd_timing_data *td = gpd_data->td;
1295 bool timed = td && pm_runtime_enabled(dev);
1296 ktime_t time_start = 0;
1297 s64 elapsed_ns;
1298 int ret;
1299
1300 dev_dbg(dev, "%s()\n", __func__);
1301
1302 genpd = dev_to_genpd(dev);
1303 if (IS_ERR(genpd))
1304 return -EINVAL;
1305
1306 /*
1307 * As we don't power off a non IRQ safe domain, which holds
1308 * an IRQ safe device, we don't need to restore power to it.
1309 */
1310 if (irq_safe_dev_in_sleep_domain(dev, genpd))
1311 goto out;
1312
1313 genpd_lock(genpd);
1314 genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
1315 ret = genpd_power_on(genpd, 0);
1316 genpd_unlock(genpd);
1317
1318 if (ret)
1319 return ret;
1320
1321 out:
1322 /* Measure resume latency. */
1323 if (timed)
1324 time_start = ktime_get();
1325
1326 ret = genpd_start_dev(genpd, dev);
1327 if (ret)
1328 goto err_poweroff;
1329
1330 ret = __genpd_runtime_resume(dev);
1331 if (ret)
1332 goto err_stop;
1333
1334 /* Update resume latency value if the measured time exceeds it. */
1335 if (timed) {
1336 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1337 if (elapsed_ns > td->resume_latency_ns) {
1338 td->resume_latency_ns = elapsed_ns;
1339 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
1340 elapsed_ns);
1341 genpd->gd->max_off_time_changed = true;
1342 td->constraint_changed = true;
1343 }
1344 }
1345
1346 return 0;
1347
1348err_stop:
1349 genpd_stop_dev(genpd, dev);
1350err_poweroff:
1351 if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
1352 genpd_lock(genpd);
1353 genpd_power_off(genpd, true, 0);
1354 gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1355 genpd_unlock(genpd);
1356 }
1357
1358 return ret;
1359}
1360
1361static bool pd_ignore_unused;
1362static int __init pd_ignore_unused_setup(char *__unused)
1363{
1364 pd_ignore_unused = true;
1365 return 1;
1366}
1367__setup("pd_ignore_unused", pd_ignore_unused_setup);
1368
1369/**
1370 * genpd_power_off_unused - Power off all PM domains with no devices in use.
1371 */
1372static int __init genpd_power_off_unused(void)
1373{
1374 struct generic_pm_domain *genpd;
1375
1376 if (pd_ignore_unused) {
1377 pr_warn("genpd: Not disabling unused power domains\n");
1378 return 0;
1379 }
1380
1381 pr_info("genpd: Disabling unused power domains\n");
1382 mutex_lock(&gpd_list_lock);
1383
1384 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
1385 genpd_queue_power_off_work(genpd);
1386 }
1387
1388 mutex_unlock(&gpd_list_lock);
1389
1390 return 0;
1391}
1392late_initcall_sync(genpd_power_off_unused);
1393
1394#ifdef CONFIG_PM_SLEEP
1395
1396/**
1397 * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
1398 * @genpd: PM domain to power off, if possible.
1399 * @use_lock: use the lock.
1400 * @depth: nesting count for lockdep.
1401 *
1402 * Check if the given PM domain can be powered off (during system suspend or
1403 * hibernation) and do that if so. Also, in that case propagate to its parents.
1404 *
1405 * This function is only called in "noirq" and "syscore" stages of system power
1406 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1407 * these cases the lock must be held.
1408 */
1409static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1410 unsigned int depth)
1411{
1412 struct gpd_link *link;
1413
1414 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1415 return;
1416
1417 if (genpd->suspended_count != genpd->device_count
1418 || atomic_read(&genpd->sd_count) > 0)
1419 return;
1420
1421 /* Check that the children are in their deepest (powered-off) state. */
1422 list_for_each_entry(link, &genpd->parent_links, parent_node) {
1423 struct generic_pm_domain *child = link->child;
1424 if (child->state_idx < child->state_count - 1)
1425 return;
1426 }
1427
1428 if (genpd->gov && genpd->gov->system_power_down_ok) {
1429 if (!genpd->gov->system_power_down_ok(&genpd->domain))
1430 return;
1431 } else {
1432 /* Default to the deepest state. */
1433 genpd->state_idx = genpd->state_count - 1;
1434 }
1435
1436 if (_genpd_power_off(genpd, false)) {
1437 genpd->states[genpd->state_idx].rejected++;
1438 return;
1439 } else {
1440 genpd->states[genpd->state_idx].usage++;
1441
1442 /*
1443 * The ->system_power_down_ok() callback is currently used only
1444 * for s2idle. Use it to know when to update the usage counter.
1445 */
1446 if (genpd->gov && genpd->gov->system_power_down_ok)
1447 genpd->states[genpd->state_idx].usage_s2idle++;
1448 }
1449
1450 genpd->status = GENPD_STATE_OFF;
1451
1452 list_for_each_entry(link, &genpd->child_links, child_node) {
1453 genpd_sd_counter_dec(link->parent);
1454
1455 if (use_lock)
1456 genpd_lock_nested(link->parent, depth + 1);
1457
1458 genpd_sync_power_off(link->parent, use_lock, depth + 1);
1459
1460 if (use_lock)
1461 genpd_unlock(link->parent);
1462 }
1463}
1464
1465/**
1466 * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
1467 * @genpd: PM domain to power on.
1468 * @use_lock: use the lock.
1469 * @depth: nesting count for lockdep.
1470 *
1471 * This function is only called in "noirq" and "syscore" stages of system power
1472 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1473 * these cases the lock must be held.
1474 */
1475static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1476 unsigned int depth)
1477{
1478 struct gpd_link *link;
1479
1480 if (genpd_status_on(genpd))
1481 return;
1482
1483 list_for_each_entry(link, &genpd->child_links, child_node) {
1484 genpd_sd_counter_inc(link->parent);
1485
1486 if (use_lock)
1487 genpd_lock_nested(link->parent, depth + 1);
1488
1489 genpd_sync_power_on(link->parent, use_lock, depth + 1);
1490
1491 if (use_lock)
1492 genpd_unlock(link->parent);
1493 }
1494
1495 _genpd_power_on(genpd, false);
1496 genpd->status = GENPD_STATE_ON;
1497}
1498
1499/**
1500 * genpd_prepare - Start power transition of a device in a PM domain.
1501 * @dev: Device to start the transition of.
1502 *
1503 * Start a power transition of a device (during a system-wide power transition)
1504 * under the assumption that its pm_domain field points to the domain member of
1505 * an object of type struct generic_pm_domain representing a PM domain
1506 * consisting of I/O devices.
1507 */
1508static int genpd_prepare(struct device *dev)
1509{
1510 struct generic_pm_domain *genpd;
1511 int ret;
1512
1513 dev_dbg(dev, "%s()\n", __func__);
1514
1515 genpd = dev_to_genpd(dev);
1516 if (IS_ERR(genpd))
1517 return -EINVAL;
1518
1519 genpd_lock(genpd);
1520 genpd->prepared_count++;
1521 genpd_unlock(genpd);
1522
1523 ret = pm_generic_prepare(dev);
1524 if (ret < 0) {
1525 genpd_lock(genpd);
1526
1527 genpd->prepared_count--;
1528
1529 genpd_unlock(genpd);
1530 }
1531
1532 /* Never return 1, as genpd don't cope with the direct_complete path. */
1533 return ret >= 0 ? 0 : ret;
1534}
1535
1536/**
1537 * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1538 * I/O pm domain.
1539 * @dev: Device to suspend.
1540 * @suspend_noirq: Generic suspend_noirq callback.
1541 * @resume_noirq: Generic resume_noirq callback.
1542 *
1543 * Stop the device and remove power from the domain if all devices in it have
1544 * been stopped.
1545 */
1546static int genpd_finish_suspend(struct device *dev,
1547 int (*suspend_noirq)(struct device *dev),
1548 int (*resume_noirq)(struct device *dev))
1549{
1550 struct generic_pm_domain *genpd;
1551 int ret = 0;
1552
1553 genpd = dev_to_genpd(dev);
1554 if (IS_ERR(genpd))
1555 return -EINVAL;
1556
1557 ret = suspend_noirq(dev);
1558 if (ret)
1559 return ret;
1560
1561 if (device_awake_path(dev) && genpd_is_active_wakeup(genpd) &&
1562 !device_out_band_wakeup(dev))
1563 return 0;
1564
1565 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1566 !pm_runtime_status_suspended(dev)) {
1567 ret = genpd_stop_dev(genpd, dev);
1568 if (ret) {
1569 resume_noirq(dev);
1570 return ret;
1571 }
1572 }
1573
1574 genpd_lock(genpd);
1575 genpd->suspended_count++;
1576 genpd_sync_power_off(genpd, true, 0);
1577 genpd_unlock(genpd);
1578
1579 return 0;
1580}
1581
1582/**
1583 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1584 * @dev: Device to suspend.
1585 *
1586 * Stop the device and remove power from the domain if all devices in it have
1587 * been stopped.
1588 */
1589static int genpd_suspend_noirq(struct device *dev)
1590{
1591 dev_dbg(dev, "%s()\n", __func__);
1592
1593 return genpd_finish_suspend(dev,
1594 pm_generic_suspend_noirq,
1595 pm_generic_resume_noirq);
1596}
1597
1598/**
1599 * genpd_finish_resume - Completion of resume of device in an I/O PM domain.
1600 * @dev: Device to resume.
1601 * @resume_noirq: Generic resume_noirq callback.
1602 *
1603 * Restore power to the device's PM domain, if necessary, and start the device.
1604 */
1605static int genpd_finish_resume(struct device *dev,
1606 int (*resume_noirq)(struct device *dev))
1607{
1608 struct generic_pm_domain *genpd;
1609 int ret;
1610
1611 dev_dbg(dev, "%s()\n", __func__);
1612
1613 genpd = dev_to_genpd(dev);
1614 if (IS_ERR(genpd))
1615 return -EINVAL;
1616
1617 if (device_awake_path(dev) && genpd_is_active_wakeup(genpd) &&
1618 !device_out_band_wakeup(dev))
1619 return resume_noirq(dev);
1620
1621 genpd_lock(genpd);
1622 genpd_sync_power_on(genpd, true, 0);
1623 genpd->suspended_count--;
1624 genpd_unlock(genpd);
1625
1626 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1627 !pm_runtime_status_suspended(dev)) {
1628 ret = genpd_start_dev(genpd, dev);
1629 if (ret)
1630 return ret;
1631 }
1632
1633 return pm_generic_resume_noirq(dev);
1634}
1635
1636/**
1637 * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1638 * @dev: Device to resume.
1639 *
1640 * Restore power to the device's PM domain, if necessary, and start the device.
1641 */
1642static int genpd_resume_noirq(struct device *dev)
1643{
1644 dev_dbg(dev, "%s()\n", __func__);
1645
1646 return genpd_finish_resume(dev, pm_generic_resume_noirq);
1647}
1648
1649/**
1650 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1651 * @dev: Device to freeze.
1652 *
1653 * Carry out a late freeze of a device under the assumption that its
1654 * pm_domain field points to the domain member of an object of type
1655 * struct generic_pm_domain representing a power domain consisting of I/O
1656 * devices.
1657 */
1658static int genpd_freeze_noirq(struct device *dev)
1659{
1660 dev_dbg(dev, "%s()\n", __func__);
1661
1662 return genpd_finish_suspend(dev,
1663 pm_generic_freeze_noirq,
1664 pm_generic_thaw_noirq);
1665}
1666
1667/**
1668 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1669 * @dev: Device to thaw.
1670 *
1671 * Start the device, unless power has been removed from the domain already
1672 * before the system transition.
1673 */
1674static int genpd_thaw_noirq(struct device *dev)
1675{
1676 dev_dbg(dev, "%s()\n", __func__);
1677
1678 return genpd_finish_resume(dev, pm_generic_thaw_noirq);
1679}
1680
1681/**
1682 * genpd_poweroff_noirq - Completion of hibernation of device in an
1683 * I/O PM domain.
1684 * @dev: Device to poweroff.
1685 *
1686 * Stop the device and remove power from the domain if all devices in it have
1687 * been stopped.
1688 */
1689static int genpd_poweroff_noirq(struct device *dev)
1690{
1691 dev_dbg(dev, "%s()\n", __func__);
1692
1693 return genpd_finish_suspend(dev,
1694 pm_generic_poweroff_noirq,
1695 pm_generic_restore_noirq);
1696}
1697
1698/**
1699 * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1700 * @dev: Device to resume.
1701 *
1702 * Make sure the domain will be in the same power state as before the
1703 * hibernation the system is resuming from and start the device if necessary.
1704 */
1705static int genpd_restore_noirq(struct device *dev)
1706{
1707 dev_dbg(dev, "%s()\n", __func__);
1708
1709 return genpd_finish_resume(dev, pm_generic_restore_noirq);
1710}
1711
1712/**
1713 * genpd_complete - Complete power transition of a device in a power domain.
1714 * @dev: Device to complete the transition of.
1715 *
1716 * Complete a power transition of a device (during a system-wide power
1717 * transition) under the assumption that its pm_domain field points to the
1718 * domain member of an object of type struct generic_pm_domain representing
1719 * a power domain consisting of I/O devices.
1720 */
1721static void genpd_complete(struct device *dev)
1722{
1723 struct generic_pm_domain *genpd;
1724
1725 dev_dbg(dev, "%s()\n", __func__);
1726
1727 genpd = dev_to_genpd(dev);
1728 if (IS_ERR(genpd))
1729 return;
1730
1731 pm_generic_complete(dev);
1732
1733 genpd_lock(genpd);
1734
1735 genpd->prepared_count--;
1736 if (!genpd->prepared_count)
1737 genpd_queue_power_off_work(genpd);
1738
1739 genpd_unlock(genpd);
1740}
1741
1742static void genpd_switch_state(struct device *dev, bool suspend)
1743{
1744 struct generic_pm_domain *genpd;
1745 bool use_lock;
1746
1747 genpd = dev_to_genpd_safe(dev);
1748 if (!genpd)
1749 return;
1750
1751 use_lock = genpd_is_irq_safe(genpd);
1752
1753 if (use_lock)
1754 genpd_lock(genpd);
1755
1756 if (suspend) {
1757 genpd->suspended_count++;
1758 genpd_sync_power_off(genpd, use_lock, 0);
1759 } else {
1760 genpd_sync_power_on(genpd, use_lock, 0);
1761 genpd->suspended_count--;
1762 }
1763
1764 if (use_lock)
1765 genpd_unlock(genpd);
1766}
1767
1768/**
1769 * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1770 * @dev: The device that is attached to the genpd, that can be suspended.
1771 *
1772 * This routine should typically be called for a device that needs to be
1773 * suspended during the syscore suspend phase. It may also be called during
1774 * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1775 * genpd.
1776 */
1777void dev_pm_genpd_suspend(struct device *dev)
1778{
1779 genpd_switch_state(dev, true);
1780}
1781EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
1782
1783/**
1784 * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1785 * @dev: The device that is attached to the genpd, which needs to be resumed.
1786 *
1787 * This routine should typically be called for a device that needs to be resumed
1788 * during the syscore resume phase. It may also be called during suspend-to-idle
1789 * to resume a corresponding CPU device that is attached to a genpd.
1790 */
1791void dev_pm_genpd_resume(struct device *dev)
1792{
1793 genpd_switch_state(dev, false);
1794}
1795EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
1796
1797#else /* !CONFIG_PM_SLEEP */
1798
1799#define genpd_prepare NULL
1800#define genpd_suspend_noirq NULL
1801#define genpd_resume_noirq NULL
1802#define genpd_freeze_noirq NULL
1803#define genpd_thaw_noirq NULL
1804#define genpd_poweroff_noirq NULL
1805#define genpd_restore_noirq NULL
1806#define genpd_complete NULL
1807
1808#endif /* CONFIG_PM_SLEEP */
1809
1810static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1811 bool has_governor)
1812{
1813 struct generic_pm_domain_data *gpd_data;
1814 struct gpd_timing_data *td;
1815 int ret;
1816
1817 ret = dev_pm_get_subsys_data(dev);
1818 if (ret)
1819 return ERR_PTR(ret);
1820
1821 gpd_data = kzalloc_obj(*gpd_data);
1822 if (!gpd_data) {
1823 ret = -ENOMEM;
1824 goto err_put;
1825 }
1826
1827 gpd_data->base.dev = dev;
1828 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1829
1830 /* Allocate data used by a governor. */
1831 if (has_governor) {
1832 td = kzalloc_obj(*td);
1833 if (!td) {
1834 ret = -ENOMEM;
1835 goto err_free;
1836 }
1837
1838 td->constraint_changed = true;
1839 td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1840 td->next_wakeup = KTIME_MAX;
1841 gpd_data->td = td;
1842 }
1843
1844 spin_lock_irq(&dev->power.lock);
1845
1846 if (dev->power.subsys_data->domain_data)
1847 ret = -EINVAL;
1848 else
1849 dev->power.subsys_data->domain_data = &gpd_data->base;
1850
1851 spin_unlock_irq(&dev->power.lock);
1852
1853 if (ret)
1854 goto err_free;
1855
1856 return gpd_data;
1857
1858 err_free:
1859 kfree(gpd_data->td);
1860 kfree(gpd_data);
1861 err_put:
1862 dev_pm_put_subsys_data(dev);
1863 return ERR_PTR(ret);
1864}
1865
1866static void genpd_free_dev_data(struct device *dev,
1867 struct generic_pm_domain_data *gpd_data)
1868{
1869 spin_lock_irq(&dev->power.lock);
1870
1871 dev->power.subsys_data->domain_data = NULL;
1872
1873 spin_unlock_irq(&dev->power.lock);
1874
1875 dev_pm_opp_clear_config(gpd_data->opp_token);
1876 kfree(gpd_data->td);
1877 kfree(gpd_data);
1878 dev_pm_put_subsys_data(dev);
1879}
1880
1881static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1882 int cpu, bool set, unsigned int depth)
1883{
1884 struct gpd_link *link;
1885
1886 if (!genpd_is_cpu_domain(genpd))
1887 return;
1888
1889 list_for_each_entry(link, &genpd->child_links, child_node) {
1890 struct generic_pm_domain *parent = link->parent;
1891
1892 genpd_lock_nested(parent, depth + 1);
1893 genpd_update_cpumask(parent, cpu, set, depth + 1);
1894 genpd_unlock(parent);
1895 }
1896
1897 if (set)
1898 cpumask_set_cpu(cpu, genpd->cpus);
1899 else
1900 cpumask_clear_cpu(cpu, genpd->cpus);
1901}
1902
1903static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1904{
1905 if (cpu >= 0)
1906 genpd_update_cpumask(genpd, cpu, true, 0);
1907}
1908
1909static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1910{
1911 if (cpu >= 0)
1912 genpd_update_cpumask(genpd, cpu, false, 0);
1913}
1914
1915static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1916{
1917 int cpu;
1918
1919 if (!genpd_is_cpu_domain(genpd))
1920 return -1;
1921
1922 for_each_possible_cpu(cpu) {
1923 if (get_cpu_device(cpu) == dev)
1924 return cpu;
1925 }
1926
1927 return -1;
1928}
1929
1930static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1931 struct device *base_dev)
1932{
1933 struct genpd_governor_data *gd = genpd->gd;
1934 struct generic_pm_domain_data *gpd_data;
1935 int ret;
1936
1937 dev_dbg(dev, "%s()\n", __func__);
1938
1939 gpd_data = genpd_alloc_dev_data(dev, gd);
1940 if (IS_ERR(gpd_data))
1941 return PTR_ERR(gpd_data);
1942
1943 gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1944
1945 gpd_data->hw_mode = genpd->get_hwmode_dev ? genpd->get_hwmode_dev(genpd, dev) : false;
1946
1947 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1948 if (ret)
1949 goto out;
1950
1951 genpd_lock(genpd);
1952
1953 genpd_set_cpumask(genpd, gpd_data->cpu);
1954
1955 genpd->device_count++;
1956 if (gd)
1957 gd->max_off_time_changed = true;
1958
1959 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1960
1961 genpd_unlock(genpd);
1962 dev_pm_domain_set(dev, &genpd->domain);
1963 out:
1964 if (ret)
1965 genpd_free_dev_data(dev, gpd_data);
1966 else
1967 dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1968 DEV_PM_QOS_RESUME_LATENCY);
1969
1970 return ret;
1971}
1972
1973/**
1974 * pm_genpd_add_device - Add a device to an I/O PM domain.
1975 * @genpd: PM domain to add the device to.
1976 * @dev: Device to be added.
1977 */
1978int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1979{
1980 int ret;
1981
1982 if (!genpd || !dev)
1983 return -EINVAL;
1984
1985 mutex_lock(&gpd_list_lock);
1986 ret = genpd_add_device(genpd, dev, dev);
1987 mutex_unlock(&gpd_list_lock);
1988
1989 return ret;
1990}
1991EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1992
1993static int genpd_remove_device(struct generic_pm_domain *genpd,
1994 struct device *dev)
1995{
1996 struct generic_pm_domain_data *gpd_data;
1997 struct pm_domain_data *pdd;
1998 int ret = 0;
1999
2000 dev_dbg(dev, "%s()\n", __func__);
2001
2002 pdd = dev->power.subsys_data->domain_data;
2003 gpd_data = to_gpd_data(pdd);
2004 dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
2005 DEV_PM_QOS_RESUME_LATENCY);
2006
2007 genpd_lock(genpd);
2008
2009 if (genpd->prepared_count > 0) {
2010 ret = -EAGAIN;
2011 goto out;
2012 }
2013
2014 genpd->device_count--;
2015 if (genpd->gd)
2016 genpd->gd->max_off_time_changed = true;
2017
2018 genpd_clear_cpumask(genpd, gpd_data->cpu);
2019
2020 list_del_init(&pdd->list_node);
2021
2022 genpd_unlock(genpd);
2023
2024 dev_pm_domain_set(dev, NULL);
2025
2026 if (genpd->detach_dev)
2027 genpd->detach_dev(genpd, dev);
2028
2029 genpd_free_dev_data(dev, gpd_data);
2030
2031 return 0;
2032
2033 out:
2034 genpd_unlock(genpd);
2035 dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
2036
2037 return ret;
2038}
2039
2040/**
2041 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
2042 * @dev: Device to be removed.
2043 */
2044int pm_genpd_remove_device(struct device *dev)
2045{
2046 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
2047
2048 if (!genpd)
2049 return -EINVAL;
2050
2051 return genpd_remove_device(genpd, dev);
2052}
2053EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
2054
2055/**
2056 * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
2057 *
2058 * @dev: Device that should be associated with the notifier
2059 * @nb: The notifier block to register
2060 *
2061 * Users may call this function to add a genpd power on/off notifier for an
2062 * attached @dev. Only one notifier per device is allowed. The notifier is
2063 * sent when genpd is powering on/off the PM domain.
2064 *
2065 * It is assumed that the user guarantee that the genpd wouldn't be detached
2066 * while this routine is getting called.
2067 *
2068 * Returns 0 on success and negative error values on failures.
2069 */
2070int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
2071{
2072 struct generic_pm_domain *genpd;
2073 struct generic_pm_domain_data *gpd_data;
2074 int ret;
2075
2076 genpd = dev_to_genpd_safe(dev);
2077 if (!genpd)
2078 return -ENODEV;
2079
2080 if (WARN_ON(!dev->power.subsys_data ||
2081 !dev->power.subsys_data->domain_data))
2082 return -EINVAL;
2083
2084 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
2085 if (gpd_data->power_nb)
2086 return -EEXIST;
2087
2088 genpd_lock(genpd);
2089 ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
2090 genpd_unlock(genpd);
2091
2092 if (ret) {
2093 dev_warn(dev, "failed to add notifier for PM domain %s\n",
2094 dev_name(&genpd->dev));
2095 return ret;
2096 }
2097
2098 gpd_data->power_nb = nb;
2099 return 0;
2100}
2101EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
2102
2103/**
2104 * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
2105 *
2106 * @dev: Device that is associated with the notifier
2107 *
2108 * Users may call this function to remove a genpd power on/off notifier for an
2109 * attached @dev.
2110 *
2111 * It is assumed that the user guarantee that the genpd wouldn't be detached
2112 * while this routine is getting called.
2113 *
2114 * Returns 0 on success and negative error values on failures.
2115 */
2116int dev_pm_genpd_remove_notifier(struct device *dev)
2117{
2118 struct generic_pm_domain *genpd;
2119 struct generic_pm_domain_data *gpd_data;
2120 int ret;
2121
2122 genpd = dev_to_genpd_safe(dev);
2123 if (!genpd)
2124 return -ENODEV;
2125
2126 if (WARN_ON(!dev->power.subsys_data ||
2127 !dev->power.subsys_data->domain_data))
2128 return -EINVAL;
2129
2130 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
2131 if (!gpd_data->power_nb)
2132 return -ENODEV;
2133
2134 genpd_lock(genpd);
2135 ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
2136 gpd_data->power_nb);
2137 genpd_unlock(genpd);
2138
2139 if (ret) {
2140 dev_warn(dev, "failed to remove notifier for PM domain %s\n",
2141 dev_name(&genpd->dev));
2142 return ret;
2143 }
2144
2145 gpd_data->power_nb = NULL;
2146 return 0;
2147}
2148EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
2149
2150static int genpd_add_subdomain(struct generic_pm_domain *genpd,
2151 struct generic_pm_domain *subdomain)
2152{
2153 struct gpd_link *link, *itr;
2154 int ret = 0;
2155
2156 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
2157 || genpd == subdomain)
2158 return -EINVAL;
2159
2160 /*
2161 * If the domain can be powered on/off in an IRQ safe
2162 * context, ensure that the subdomain can also be
2163 * powered on/off in that context.
2164 */
2165 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
2166 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
2167 dev_name(&genpd->dev), subdomain->name);
2168 return -EINVAL;
2169 }
2170
2171 link = kzalloc_obj(*link);
2172 if (!link)
2173 return -ENOMEM;
2174
2175 genpd_lock(subdomain);
2176 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
2177
2178 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
2179 ret = -EINVAL;
2180 goto out;
2181 }
2182
2183 list_for_each_entry(itr, &genpd->parent_links, parent_node) {
2184 if (itr->child == subdomain && itr->parent == genpd) {
2185 ret = -EINVAL;
2186 goto out;
2187 }
2188 }
2189
2190 link->parent = genpd;
2191 list_add_tail(&link->parent_node, &genpd->parent_links);
2192 link->child = subdomain;
2193 list_add_tail(&link->child_node, &subdomain->child_links);
2194 if (genpd_status_on(subdomain))
2195 genpd_sd_counter_inc(genpd);
2196
2197 out:
2198 genpd_unlock(genpd);
2199 genpd_unlock(subdomain);
2200 if (ret)
2201 kfree(link);
2202 return ret;
2203}
2204
2205/**
2206 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2207 * @genpd: Leader PM domain to add the subdomain to.
2208 * @subdomain: Subdomain to be added.
2209 */
2210int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
2211 struct generic_pm_domain *subdomain)
2212{
2213 int ret;
2214
2215 mutex_lock(&gpd_list_lock);
2216 ret = genpd_add_subdomain(genpd, subdomain);
2217 mutex_unlock(&gpd_list_lock);
2218
2219 return ret;
2220}
2221EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
2222
2223/**
2224 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2225 * @genpd: Leader PM domain to remove the subdomain from.
2226 * @subdomain: Subdomain to be removed.
2227 */
2228int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
2229 struct generic_pm_domain *subdomain)
2230{
2231 struct gpd_link *l, *link;
2232 int ret = -EINVAL;
2233
2234 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
2235 return -EINVAL;
2236
2237 genpd_lock(subdomain);
2238 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
2239
2240 if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
2241 pr_warn("%s: unable to remove subdomain %s\n",
2242 dev_name(&genpd->dev), subdomain->name);
2243 ret = -EBUSY;
2244 goto out;
2245 }
2246
2247 list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
2248 if (link->child != subdomain)
2249 continue;
2250
2251 list_del(&link->parent_node);
2252 list_del(&link->child_node);
2253 kfree(link);
2254 if (genpd_status_on(subdomain))
2255 genpd_sd_counter_dec(genpd);
2256
2257 ret = 0;
2258 break;
2259 }
2260
2261out:
2262 genpd_unlock(genpd);
2263 genpd_unlock(subdomain);
2264
2265 return ret;
2266}
2267EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
2268
2269static void genpd_free_default_power_state(struct genpd_power_state *states,
2270 unsigned int state_count)
2271{
2272 kfree(states);
2273}
2274
2275static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
2276{
2277 struct genpd_power_state *state;
2278
2279 state = kzalloc_obj(*state);
2280 if (!state)
2281 return -ENOMEM;
2282
2283 genpd->states = state;
2284 genpd->state_count = 1;
2285 genpd->free_states = genpd_free_default_power_state;
2286
2287 return 0;
2288}
2289
2290static void genpd_provider_release(struct device *dev)
2291{
2292 /* nothing to be done here */
2293}
2294
2295static int genpd_alloc_data(struct generic_pm_domain *genpd)
2296{
2297 struct genpd_governor_data *gd = NULL;
2298 int ret;
2299
2300 if (genpd_is_cpu_domain(genpd) &&
2301 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
2302 return -ENOMEM;
2303
2304 if (genpd->gov) {
2305 gd = kzalloc_obj(*gd);
2306 if (!gd) {
2307 ret = -ENOMEM;
2308 goto free;
2309 }
2310
2311 gd->max_off_time_ns = -1;
2312 gd->max_off_time_changed = true;
2313 gd->next_wakeup = KTIME_MAX;
2314 gd->next_hrtimer = KTIME_MAX;
2315 }
2316
2317 /* Use only one "off" state if there were no states declared */
2318 if (genpd->state_count == 0) {
2319 ret = genpd_set_default_power_state(genpd);
2320 if (ret)
2321 goto free;
2322 }
2323
2324 genpd->gd = gd;
2325 device_initialize(&genpd->dev);
2326 genpd->dev.release = genpd_provider_release;
2327 genpd->dev.bus = &genpd_provider_bus_type;
2328 genpd->dev.parent = &genpd_provider_bus;
2329
2330 if (!genpd_is_dev_name_fw(genpd)) {
2331 dev_set_name(&genpd->dev, "%s", genpd->name);
2332 } else {
2333 ret = ida_alloc(&genpd_ida, GFP_KERNEL);
2334 if (ret < 0)
2335 goto put;
2336
2337 genpd->device_id = ret;
2338 dev_set_name(&genpd->dev, "%s_%u", genpd->name, genpd->device_id);
2339 }
2340
2341 return 0;
2342put:
2343 put_device(&genpd->dev);
2344 if (genpd->free_states == genpd_free_default_power_state) {
2345 kfree(genpd->states);
2346 genpd->states = NULL;
2347 }
2348free:
2349 if (genpd_is_cpu_domain(genpd))
2350 free_cpumask_var(genpd->cpus);
2351 kfree(gd);
2352 return ret;
2353}
2354
2355static void genpd_free_data(struct generic_pm_domain *genpd)
2356{
2357 put_device(&genpd->dev);
2358 if (genpd->device_id != -ENXIO)
2359 ida_free(&genpd_ida, genpd->device_id);
2360 if (genpd_is_cpu_domain(genpd))
2361 free_cpumask_var(genpd->cpus);
2362 if (genpd->free_states)
2363 genpd->free_states(genpd->states, genpd->state_count);
2364 kfree(genpd->gd);
2365}
2366
2367static void genpd_lock_init(struct generic_pm_domain *genpd)
2368{
2369 if (genpd_is_cpu_domain(genpd)) {
2370 raw_spin_lock_init(&genpd->raw_slock);
2371 genpd->lock_ops = &genpd_raw_spin_ops;
2372 } else if (genpd_is_irq_safe(genpd)) {
2373 spin_lock_init(&genpd->slock);
2374 genpd->lock_ops = &genpd_spin_ops;
2375 } else {
2376 mutex_init(&genpd->mlock);
2377 genpd->lock_ops = &genpd_mtx_ops;
2378 }
2379}
2380
2381#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2382static void genpd_set_stay_on(struct generic_pm_domain *genpd, bool is_off)
2383{
2384 genpd->stay_on = !genpd_is_no_stay_on(genpd) && !is_off;
2385}
2386#else
2387static void genpd_set_stay_on(struct generic_pm_domain *genpd, bool is_off)
2388{
2389 genpd->stay_on = false;
2390}
2391#endif
2392
2393/**
2394 * pm_genpd_init - Initialize a generic I/O PM domain object.
2395 * @genpd: PM domain object to initialize.
2396 * @gov: PM domain governor to associate with the domain (may be NULL).
2397 * @is_off: Initial value of the domain's power_is_off field.
2398 *
2399 * Returns 0 on successful initialization, else a negative error code.
2400 */
2401int pm_genpd_init(struct generic_pm_domain *genpd,
2402 struct dev_power_governor *gov, bool is_off)
2403{
2404 int ret;
2405
2406 if (IS_ERR_OR_NULL(genpd))
2407 return -EINVAL;
2408
2409 INIT_LIST_HEAD(&genpd->parent_links);
2410 INIT_LIST_HEAD(&genpd->child_links);
2411 INIT_LIST_HEAD(&genpd->dev_list);
2412 RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
2413 genpd_lock_init(genpd);
2414 genpd->gov = gov;
2415 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2416 atomic_set(&genpd->sd_count, 0);
2417 genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
2418 genpd_set_stay_on(genpd, is_off);
2419 genpd->sync_state = GENPD_SYNC_STATE_OFF;
2420 genpd->device_count = 0;
2421 genpd->provider = NULL;
2422 genpd->device_id = -ENXIO;
2423 genpd->has_provider = false;
2424 genpd->opp_table = NULL;
2425 genpd->accounting_time = ktime_get_mono_fast_ns();
2426 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
2427 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
2428 genpd->domain.ops.prepare = genpd_prepare;
2429 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
2430 genpd->domain.ops.resume_noirq = genpd_resume_noirq;
2431 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
2432 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
2433 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
2434 genpd->domain.ops.restore_noirq = genpd_restore_noirq;
2435 genpd->domain.ops.complete = genpd_complete;
2436 genpd->domain.start = genpd_dev_pm_start;
2437 genpd->domain.set_performance_state = genpd_dev_pm_set_performance_state;
2438
2439 if (genpd->flags & GENPD_FLAG_PM_CLK) {
2440 genpd->dev_ops.stop = pm_clk_suspend;
2441 genpd->dev_ops.start = pm_clk_resume;
2442 }
2443
2444 /* The always-on governor works better with the corresponding flag. */
2445 if (gov == &pm_domain_always_on_gov)
2446 genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
2447
2448 /* Always-on domains must be powered on at initialization. */
2449 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2450 !genpd_status_on(genpd)) {
2451 pr_err("always-on PM domain %s is not on\n", genpd->name);
2452 return -EINVAL;
2453 }
2454
2455 /* Multiple states but no governor doesn't make sense. */
2456 if (!gov && genpd->state_count > 1)
2457 pr_warn("%s: no governor for states\n", genpd->name);
2458
2459 ret = genpd_alloc_data(genpd);
2460 if (ret)
2461 return ret;
2462
2463 mutex_lock(&gpd_list_lock);
2464 list_add(&genpd->gpd_list_node, &gpd_list);
2465 mutex_unlock(&gpd_list_lock);
2466 genpd_debug_add(genpd);
2467
2468 return 0;
2469}
2470EXPORT_SYMBOL_GPL(pm_genpd_init);
2471
2472static int genpd_remove(struct generic_pm_domain *genpd)
2473{
2474 struct gpd_link *l, *link;
2475
2476 if (IS_ERR_OR_NULL(genpd))
2477 return -EINVAL;
2478
2479 genpd_lock(genpd);
2480
2481 if (genpd->has_provider) {
2482 genpd_unlock(genpd);
2483 pr_err("Provider present, unable to remove %s\n", dev_name(&genpd->dev));
2484 return -EBUSY;
2485 }
2486
2487 if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2488 genpd_unlock(genpd);
2489 pr_err("%s: unable to remove %s\n", __func__, dev_name(&genpd->dev));
2490 return -EBUSY;
2491 }
2492
2493 list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2494 list_del(&link->parent_node);
2495 list_del(&link->child_node);
2496 kfree(link);
2497 }
2498
2499 list_del(&genpd->gpd_list_node);
2500 genpd_unlock(genpd);
2501 genpd_debug_remove(genpd);
2502 cancel_work_sync(&genpd->power_off_work);
2503 genpd_free_data(genpd);
2504
2505 pr_debug("%s: removed %s\n", __func__, dev_name(&genpd->dev));
2506
2507 return 0;
2508}
2509
2510/**
2511 * pm_genpd_remove - Remove a generic I/O PM domain
2512 * @genpd: Pointer to PM domain that is to be removed.
2513 *
2514 * To remove the PM domain, this function:
2515 * - Removes the PM domain as a subdomain to any parent domains,
2516 * if it was added.
2517 * - Removes the PM domain from the list of registered PM domains.
2518 *
2519 * The PM domain will only be removed, if the associated provider has
2520 * been removed, it is not a parent to any other PM domain and has no
2521 * devices associated with it.
2522 */
2523int pm_genpd_remove(struct generic_pm_domain *genpd)
2524{
2525 int ret;
2526
2527 mutex_lock(&gpd_list_lock);
2528 ret = genpd_remove(genpd);
2529 mutex_unlock(&gpd_list_lock);
2530
2531 return ret;
2532}
2533EXPORT_SYMBOL_GPL(pm_genpd_remove);
2534
2535#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2536
2537/*
2538 * Device Tree based PM domain providers.
2539 *
2540 * The code below implements generic device tree based PM domain providers that
2541 * bind device tree nodes with generic PM domains registered in the system.
2542 *
2543 * Any driver that registers generic PM domains and needs to support binding of
2544 * devices to these domains is supposed to register a PM domain provider, which
2545 * maps a PM domain specifier retrieved from the device tree to a PM domain.
2546 *
2547 * Two simple mapping functions have been provided for convenience:
2548 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2549 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by
2550 * index.
2551 */
2552
2553/**
2554 * struct of_genpd_provider - PM domain provider registration structure
2555 * @link: Entry in global list of PM domain providers
2556 * @node: Pointer to device tree node of PM domain provider
2557 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2558 * into a PM domain.
2559 * @data: context pointer to be passed into @xlate callback
2560 */
2561struct of_genpd_provider {
2562 struct list_head link;
2563 struct device_node *node;
2564 genpd_xlate_t xlate;
2565 void *data;
2566};
2567
2568/* List of registered PM domain providers. */
2569static LIST_HEAD(of_genpd_providers);
2570/* Mutex to protect the list above. */
2571static DEFINE_MUTEX(of_genpd_mutex);
2572/* Used to prevent registering devices before the bus. */
2573static bool genpd_bus_registered;
2574
2575/**
2576 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
2577 * @genpdspec: OF phandle args to map into a PM domain
2578 * @data: xlate function private data - pointer to struct generic_pm_domain
2579 *
2580 * This is a generic xlate function that can be used to model PM domains that
2581 * have their own device tree nodes. The private data of xlate function needs
2582 * to be a valid pointer to struct generic_pm_domain.
2583 */
2584static struct generic_pm_domain *genpd_xlate_simple(
2585 const struct of_phandle_args *genpdspec,
2586 void *data)
2587{
2588 return data;
2589}
2590
2591/**
2592 * genpd_xlate_onecell() - Xlate function using a single index.
2593 * @genpdspec: OF phandle args to map into a PM domain
2594 * @data: xlate function private data - pointer to struct genpd_onecell_data
2595 *
2596 * This is a generic xlate function that can be used to model simple PM domain
2597 * controllers that have one device tree node and provide multiple PM domains.
2598 * A single cell is used as an index into an array of PM domains specified in
2599 * the genpd_onecell_data struct when registering the provider.
2600 */
2601static struct generic_pm_domain *genpd_xlate_onecell(
2602 const struct of_phandle_args *genpdspec,
2603 void *data)
2604{
2605 struct genpd_onecell_data *genpd_data = data;
2606 unsigned int idx = genpdspec->args[0];
2607
2608 if (genpdspec->args_count != 1)
2609 return ERR_PTR(-EINVAL);
2610
2611 if (idx >= genpd_data->num_domains) {
2612 pr_err("%s: invalid domain index %u\n", __func__, idx);
2613 return ERR_PTR(-EINVAL);
2614 }
2615
2616 if (!genpd_data->domains[idx])
2617 return ERR_PTR(-ENOENT);
2618
2619 return genpd_data->domains[idx];
2620}
2621
2622/**
2623 * genpd_add_provider() - Register a PM domain provider for a node
2624 * @np: Device node pointer associated with the PM domain provider.
2625 * @xlate: Callback for decoding PM domain from phandle arguments.
2626 * @data: Context pointer for @xlate callback.
2627 */
2628static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2629 void *data)
2630{
2631 struct of_genpd_provider *cp;
2632
2633 cp = kzalloc_obj(*cp);
2634 if (!cp)
2635 return -ENOMEM;
2636
2637 cp->node = of_node_get(np);
2638 cp->data = data;
2639 cp->xlate = xlate;
2640 fwnode_dev_initialized(of_fwnode_handle(np), true);
2641
2642 mutex_lock(&of_genpd_mutex);
2643 list_add(&cp->link, &of_genpd_providers);
2644 mutex_unlock(&of_genpd_mutex);
2645 pr_debug("Added domain provider from %pOF\n", np);
2646
2647 return 0;
2648}
2649
2650static bool genpd_present(const struct generic_pm_domain *genpd)
2651{
2652 bool ret = false;
2653 const struct generic_pm_domain *gpd;
2654
2655 mutex_lock(&gpd_list_lock);
2656 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2657 if (gpd == genpd) {
2658 ret = true;
2659 break;
2660 }
2661 }
2662 mutex_unlock(&gpd_list_lock);
2663
2664 return ret;
2665}
2666
2667static void genpd_sync_state(struct device *dev)
2668{
2669 return of_genpd_sync_state(dev->of_node);
2670}
2671
2672/**
2673 * of_genpd_add_provider_simple() - Register a simple PM domain provider
2674 * @np: Device node pointer associated with the PM domain provider.
2675 * @genpd: Pointer to PM domain associated with the PM domain provider.
2676 */
2677int of_genpd_add_provider_simple(struct device_node *np,
2678 struct generic_pm_domain *genpd)
2679{
2680 struct fwnode_handle *fwnode;
2681 struct device *dev;
2682 int ret;
2683
2684 if (!np || !genpd)
2685 return -EINVAL;
2686
2687 if (!genpd_bus_registered)
2688 return -ENODEV;
2689
2690 if (!genpd_present(genpd))
2691 return -EINVAL;
2692
2693 genpd->dev.of_node = np;
2694
2695 fwnode = of_fwnode_handle(np);
2696 dev = get_dev_from_fwnode(fwnode);
2697 if (!dev && !genpd_is_no_sync_state(genpd)) {
2698 genpd->sync_state = GENPD_SYNC_STATE_SIMPLE;
2699 device_set_node(&genpd->dev, fwnode);
2700 } else {
2701 dev_set_drv_sync_state(dev, genpd_sync_state);
2702 }
2703
2704 put_device(dev);
2705
2706 ret = device_add(&genpd->dev);
2707 if (ret)
2708 return ret;
2709
2710 /* Parse genpd OPP table */
2711 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2712 ret = dev_pm_opp_of_add_table(&genpd->dev);
2713 if (ret) {
2714 dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
2715 goto err_del;
2716 }
2717
2718 /*
2719 * Save table for faster processing while setting performance
2720 * state.
2721 */
2722 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2723 WARN_ON(IS_ERR(genpd->opp_table));
2724 }
2725
2726 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2727 if (ret)
2728 goto err_opp;
2729
2730 genpd->provider = fwnode;
2731 genpd->has_provider = true;
2732
2733 return 0;
2734
2735err_opp:
2736 if (genpd->opp_table) {
2737 dev_pm_opp_put_opp_table(genpd->opp_table);
2738 dev_pm_opp_of_remove_table(&genpd->dev);
2739 }
2740err_del:
2741 device_del(&genpd->dev);
2742 return ret;
2743}
2744EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2745
2746/**
2747 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2748 * @np: Device node pointer associated with the PM domain provider.
2749 * @data: Pointer to the data associated with the PM domain provider.
2750 */
2751int of_genpd_add_provider_onecell(struct device_node *np,
2752 struct genpd_onecell_data *data)
2753{
2754 struct generic_pm_domain *genpd;
2755 struct fwnode_handle *fwnode;
2756 struct device *dev;
2757 unsigned int i;
2758 int ret = -EINVAL;
2759 bool sync_state = false;
2760
2761 if (!np || !data)
2762 return -EINVAL;
2763
2764 if (!genpd_bus_registered)
2765 return -ENODEV;
2766
2767 if (!data->xlate)
2768 data->xlate = genpd_xlate_onecell;
2769
2770 fwnode = of_fwnode_handle(np);
2771 dev = get_dev_from_fwnode(fwnode);
2772 if (!dev)
2773 sync_state = true;
2774 else
2775 dev_set_drv_sync_state(dev, genpd_sync_state);
2776
2777 put_device(dev);
2778
2779 for (i = 0; i < data->num_domains; i++) {
2780 genpd = data->domains[i];
2781
2782 if (!genpd)
2783 continue;
2784 if (!genpd_present(genpd))
2785 goto error;
2786
2787 genpd->dev.of_node = np;
2788
2789 if (sync_state && !genpd_is_no_sync_state(genpd)) {
2790 genpd->sync_state = GENPD_SYNC_STATE_ONECELL;
2791 device_set_node(&genpd->dev, fwnode);
2792 sync_state = false;
2793 }
2794
2795 ret = device_add(&genpd->dev);
2796 if (ret)
2797 goto error;
2798
2799 /* Parse genpd OPP table */
2800 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2801 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2802 if (ret) {
2803 dev_err_probe(&genpd->dev, ret,
2804 "Failed to add OPP table for index %d\n", i);
2805 device_del(&genpd->dev);
2806 goto error;
2807 }
2808
2809 /*
2810 * Save table for faster processing while setting
2811 * performance state.
2812 */
2813 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2814 WARN_ON(IS_ERR(genpd->opp_table));
2815 }
2816
2817 genpd->provider = fwnode;
2818 genpd->has_provider = true;
2819 }
2820
2821 ret = genpd_add_provider(np, data->xlate, data);
2822 if (ret < 0)
2823 goto error;
2824
2825 return 0;
2826
2827error:
2828 while (i--) {
2829 genpd = data->domains[i];
2830
2831 if (!genpd)
2832 continue;
2833
2834 genpd->provider = NULL;
2835 genpd->has_provider = false;
2836
2837 if (genpd->opp_table) {
2838 dev_pm_opp_put_opp_table(genpd->opp_table);
2839 dev_pm_opp_of_remove_table(&genpd->dev);
2840 }
2841
2842 device_del(&genpd->dev);
2843 }
2844
2845 return ret;
2846}
2847EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2848
2849/**
2850 * of_genpd_del_provider() - Remove a previously registered PM domain provider
2851 * @np: Device node pointer associated with the PM domain provider
2852 */
2853void of_genpd_del_provider(struct device_node *np)
2854{
2855 struct of_genpd_provider *cp, *tmp;
2856 struct generic_pm_domain *gpd;
2857
2858 mutex_lock(&gpd_list_lock);
2859 mutex_lock(&of_genpd_mutex);
2860 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2861 if (cp->node == np) {
2862 /*
2863 * For each PM domain associated with the
2864 * provider, set the 'has_provider' to false
2865 * so that the PM domain can be safely removed.
2866 */
2867 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2868 if (gpd->provider == of_fwnode_handle(np)) {
2869 gpd->has_provider = false;
2870
2871 if (gpd->opp_table) {
2872 dev_pm_opp_put_opp_table(gpd->opp_table);
2873 dev_pm_opp_of_remove_table(&gpd->dev);
2874 }
2875
2876 device_del(&gpd->dev);
2877 }
2878 }
2879
2880 fwnode_dev_initialized(of_fwnode_handle(cp->node), false);
2881 list_del(&cp->link);
2882 of_node_put(cp->node);
2883 kfree(cp);
2884 break;
2885 }
2886 }
2887 mutex_unlock(&of_genpd_mutex);
2888 mutex_unlock(&gpd_list_lock);
2889}
2890EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2891
2892/**
2893 * genpd_get_from_provider() - Look-up PM domain
2894 * @genpdspec: OF phandle args to use for look-up
2895 *
2896 * Looks for a PM domain provider under the node specified by @genpdspec and if
2897 * found, uses xlate function of the provider to map phandle args to a PM
2898 * domain.
2899 *
2900 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2901 * on failure.
2902 */
2903static struct generic_pm_domain *genpd_get_from_provider(
2904 const struct of_phandle_args *genpdspec)
2905{
2906 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2907 struct of_genpd_provider *provider;
2908
2909 if (!genpdspec)
2910 return ERR_PTR(-EINVAL);
2911
2912 mutex_lock(&of_genpd_mutex);
2913
2914 /* Check if we have such a provider in our array */
2915 list_for_each_entry(provider, &of_genpd_providers, link) {
2916 if (provider->node == genpdspec->np)
2917 genpd = provider->xlate(genpdspec, provider->data);
2918 if (!IS_ERR(genpd))
2919 break;
2920 }
2921
2922 mutex_unlock(&of_genpd_mutex);
2923
2924 return genpd;
2925}
2926
2927/**
2928 * of_genpd_add_device() - Add a device to an I/O PM domain
2929 * @genpdspec: OF phandle args to use for look-up PM domain
2930 * @dev: Device to be added.
2931 *
2932 * Looks-up an I/O PM domain based upon phandle args provided and adds
2933 * the device to the PM domain. Returns a negative error code on failure.
2934 */
2935int of_genpd_add_device(const struct of_phandle_args *genpdspec, struct device *dev)
2936{
2937 struct generic_pm_domain *genpd;
2938 int ret;
2939
2940 if (!dev)
2941 return -EINVAL;
2942
2943 mutex_lock(&gpd_list_lock);
2944
2945 genpd = genpd_get_from_provider(genpdspec);
2946 if (IS_ERR(genpd)) {
2947 ret = PTR_ERR(genpd);
2948 goto out;
2949 }
2950
2951 ret = genpd_add_device(genpd, dev, dev);
2952
2953out:
2954 mutex_unlock(&gpd_list_lock);
2955
2956 return ret;
2957}
2958EXPORT_SYMBOL_GPL(of_genpd_add_device);
2959
2960/**
2961 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2962 * @parent_spec: OF phandle args to use for parent PM domain look-up
2963 * @subdomain_spec: OF phandle args to use for subdomain look-up
2964 *
2965 * Looks-up a parent PM domain and subdomain based upon phandle args
2966 * provided and adds the subdomain to the parent PM domain. Returns a
2967 * negative error code on failure.
2968 */
2969int of_genpd_add_subdomain(const struct of_phandle_args *parent_spec,
2970 const struct of_phandle_args *subdomain_spec)
2971{
2972 struct generic_pm_domain *parent, *subdomain;
2973 int ret;
2974
2975 mutex_lock(&gpd_list_lock);
2976
2977 parent = genpd_get_from_provider(parent_spec);
2978 if (IS_ERR(parent)) {
2979 ret = PTR_ERR(parent);
2980 goto out;
2981 }
2982
2983 subdomain = genpd_get_from_provider(subdomain_spec);
2984 if (IS_ERR(subdomain)) {
2985 ret = PTR_ERR(subdomain);
2986 goto out;
2987 }
2988
2989 ret = genpd_add_subdomain(parent, subdomain);
2990
2991out:
2992 mutex_unlock(&gpd_list_lock);
2993
2994 return ret == -ENOENT ? -EPROBE_DEFER : ret;
2995}
2996EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2997
2998/**
2999 * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
3000 * @parent_spec: OF phandle args to use for parent PM domain look-up
3001 * @subdomain_spec: OF phandle args to use for subdomain look-up
3002 *
3003 * Looks-up a parent PM domain and subdomain based upon phandle args
3004 * provided and removes the subdomain from the parent PM domain. Returns a
3005 * negative error code on failure.
3006 */
3007int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec,
3008 const struct of_phandle_args *subdomain_spec)
3009{
3010 struct generic_pm_domain *parent, *subdomain;
3011 int ret;
3012
3013 mutex_lock(&gpd_list_lock);
3014
3015 parent = genpd_get_from_provider(parent_spec);
3016 if (IS_ERR(parent)) {
3017 ret = PTR_ERR(parent);
3018 goto out;
3019 }
3020
3021 subdomain = genpd_get_from_provider(subdomain_spec);
3022 if (IS_ERR(subdomain)) {
3023 ret = PTR_ERR(subdomain);
3024 goto out;
3025 }
3026
3027 ret = pm_genpd_remove_subdomain(parent, subdomain);
3028
3029out:
3030 mutex_unlock(&gpd_list_lock);
3031
3032 return ret;
3033}
3034EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
3035
3036/**
3037 * of_genpd_remove_last - Remove the last PM domain registered for a provider
3038 * @np: Pointer to device node associated with provider
3039 *
3040 * Find the last PM domain that was added by a particular provider and
3041 * remove this PM domain from the list of PM domains. The provider is
3042 * identified by the 'provider' device structure that is passed. The PM
3043 * domain will only be removed, if the provider associated with domain
3044 * has been removed.
3045 *
3046 * Returns a valid pointer to struct generic_pm_domain on success or
3047 * ERR_PTR() on failure.
3048 */
3049struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
3050{
3051 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
3052 int ret;
3053
3054 if (IS_ERR_OR_NULL(np))
3055 return ERR_PTR(-EINVAL);
3056
3057 mutex_lock(&gpd_list_lock);
3058 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
3059 if (gpd->provider == of_fwnode_handle(np)) {
3060 ret = genpd_remove(gpd);
3061 genpd = ret ? ERR_PTR(ret) : gpd;
3062 break;
3063 }
3064 }
3065 mutex_unlock(&gpd_list_lock);
3066
3067 return genpd;
3068}
3069EXPORT_SYMBOL_GPL(of_genpd_remove_last);
3070
3071static void genpd_release_dev(struct device *dev)
3072{
3073 of_node_put(dev->of_node);
3074 kfree(dev);
3075}
3076
3077static const struct bus_type genpd_bus_type = {
3078 .name = "genpd",
3079};
3080
3081/**
3082 * genpd_dev_pm_detach - Detach a device from its PM domain.
3083 * @dev: Device to detach.
3084 * @power_off: Currently not used
3085 *
3086 * Try to locate a corresponding generic PM domain, which the device was
3087 * attached to previously. If such is found, the device is detached from it.
3088 */
3089static void genpd_dev_pm_detach(struct device *dev, bool power_off)
3090{
3091 struct generic_pm_domain *pd;
3092 bool is_virt_dev;
3093 unsigned int i;
3094 int ret = 0;
3095
3096 pd = dev_to_genpd(dev);
3097 if (IS_ERR(pd))
3098 return;
3099
3100 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
3101
3102 /* Check if the device was created by genpd at attach. */
3103 is_virt_dev = dev->bus == &genpd_bus_type;
3104
3105 /* Disable runtime PM if we enabled it at attach. */
3106 if (is_virt_dev)
3107 pm_runtime_disable(dev);
3108
3109 /* Drop the default performance state */
3110 if (dev_gpd_data(dev)->default_pstate) {
3111 dev_pm_genpd_set_performance_state(dev, 0);
3112 dev_gpd_data(dev)->default_pstate = 0;
3113 }
3114
3115 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
3116 ret = genpd_remove_device(pd, dev);
3117 if (ret != -EAGAIN)
3118 break;
3119
3120 mdelay(i);
3121 cond_resched();
3122 }
3123
3124 if (ret < 0) {
3125 dev_err(dev, "failed to remove from PM domain %s: %d",
3126 pd->name, ret);
3127 return;
3128 }
3129
3130 /* Check if PM domain can be powered off after removing this device. */
3131 genpd_queue_power_off_work(pd);
3132
3133 /* Unregister the device if it was created by genpd. */
3134 if (is_virt_dev)
3135 device_unregister(dev);
3136}
3137
3138static void genpd_dev_pm_sync(struct device *dev)
3139{
3140 struct generic_pm_domain *pd;
3141
3142 pd = dev_to_genpd(dev);
3143 if (IS_ERR(pd))
3144 return;
3145
3146 genpd_queue_power_off_work(pd);
3147}
3148
3149static int genpd_set_required_opp_dev(struct device *dev,
3150 struct device *base_dev)
3151{
3152 struct dev_pm_opp_config config = {
3153 .required_dev = dev,
3154 };
3155 int ret;
3156
3157 /* Limit support to non-providers for now. */
3158 if (of_property_present(base_dev->of_node, "#power-domain-cells"))
3159 return 0;
3160
3161 if (!dev_pm_opp_of_has_required_opp(base_dev))
3162 return 0;
3163
3164 ret = dev_pm_opp_set_config(base_dev, &config);
3165 if (ret < 0)
3166 return ret;
3167
3168 dev_gpd_data(dev)->opp_token = ret;
3169 return 0;
3170}
3171
3172static int genpd_set_required_opp(struct device *dev, unsigned int index)
3173{
3174 int ret, pstate;
3175
3176 /* Set the default performance state */
3177 pstate = of_get_required_opp_performance_state(dev->of_node, index);
3178 if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
3179 ret = pstate;
3180 goto err;
3181 } else if (pstate > 0) {
3182 ret = dev_pm_genpd_set_performance_state(dev, pstate);
3183 if (ret)
3184 goto err;
3185 dev_gpd_data(dev)->default_pstate = pstate;
3186 }
3187
3188 return 0;
3189err:
3190 dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
3191 dev_to_genpd(dev)->name, ret);
3192 return ret;
3193}
3194
3195static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
3196 unsigned int index, unsigned int num_domains,
3197 bool power_on)
3198{
3199 struct of_phandle_args pd_args;
3200 struct generic_pm_domain *pd;
3201 int ret;
3202
3203 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
3204 "#power-domain-cells", index, &pd_args);
3205 if (ret < 0)
3206 return ret;
3207
3208 mutex_lock(&gpd_list_lock);
3209 pd = genpd_get_from_provider(&pd_args);
3210 of_node_put(pd_args.np);
3211 if (IS_ERR(pd)) {
3212 mutex_unlock(&gpd_list_lock);
3213 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
3214 __func__, PTR_ERR(pd));
3215 return driver_deferred_probe_check_state(base_dev);
3216 }
3217
3218 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
3219
3220 ret = genpd_add_device(pd, dev, base_dev);
3221 mutex_unlock(&gpd_list_lock);
3222
3223 if (ret < 0)
3224 return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
3225
3226 dev->pm_domain->detach = genpd_dev_pm_detach;
3227 dev->pm_domain->sync = genpd_dev_pm_sync;
3228
3229 /*
3230 * For a single PM domain the index of the required OPP must be zero, so
3231 * let's try to assign a required dev in that case. In the multiple PM
3232 * domains case, we need platform code to specify the index.
3233 */
3234 if (num_domains == 1) {
3235 ret = genpd_set_required_opp_dev(dev, base_dev);
3236 if (ret)
3237 goto err;
3238 }
3239
3240 ret = genpd_set_required_opp(dev, index);
3241 if (ret)
3242 goto err;
3243
3244 if (power_on) {
3245 genpd_lock(pd);
3246 ret = genpd_power_on(pd, 0);
3247 genpd_unlock(pd);
3248 }
3249
3250 if (ret) {
3251 /* Drop the default performance state */
3252 if (dev_gpd_data(dev)->default_pstate) {
3253 dev_pm_genpd_set_performance_state(dev, 0);
3254 dev_gpd_data(dev)->default_pstate = 0;
3255 }
3256
3257 genpd_remove_device(pd, dev);
3258 return -EPROBE_DEFER;
3259 }
3260
3261 return 1;
3262
3263err:
3264 genpd_remove_device(pd, dev);
3265 return ret;
3266}
3267
3268/**
3269 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
3270 * @dev: Device to attach.
3271 *
3272 * Parse device's OF node to find a PM domain specifier. If such is found,
3273 * attaches the device to retrieved pm_domain ops.
3274 *
3275 * Returns 1 on successfully attached PM domain, 0 when the device don't need a
3276 * PM domain or when multiple power-domains exists for it, else a negative error
3277 * code. Note that if a power-domain exists for the device, but it cannot be
3278 * found or turned on, then return -EPROBE_DEFER to ensure that the device is
3279 * not probed and to re-try again later.
3280 */
3281int genpd_dev_pm_attach(struct device *dev)
3282{
3283 if (!dev->of_node)
3284 return 0;
3285
3286 /*
3287 * Devices with multiple PM domains must be attached separately, as we
3288 * can only attach one PM domain per device.
3289 */
3290 if (of_count_phandle_with_args(dev->of_node, "power-domains",
3291 "#power-domain-cells") != 1)
3292 return 0;
3293
3294 return __genpd_dev_pm_attach(dev, dev, 0, 1, true);
3295}
3296EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
3297
3298/**
3299 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
3300 * @dev: The device used to lookup the PM domain.
3301 * @index: The index of the PM domain.
3302 *
3303 * Parse device's OF node to find a PM domain specifier at the provided @index.
3304 * If such is found, creates a virtual device and attaches it to the retrieved
3305 * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
3306 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
3307 *
3308 * Returns the created virtual device if successfully attached PM domain, NULL
3309 * when the device don't need a PM domain, else an ERR_PTR() in case of
3310 * failures. If a power-domain exists for the device, but cannot be found or
3311 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
3312 * is not probed and to re-try again later.
3313 */
3314struct device *genpd_dev_pm_attach_by_id(struct device *dev,
3315 unsigned int index)
3316{
3317 struct device *virt_dev;
3318 int num_domains;
3319 int ret;
3320
3321 if (!dev->of_node)
3322 return NULL;
3323
3324 /* Verify that the index is within a valid range. */
3325 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
3326 "#power-domain-cells");
3327 if (num_domains < 0 || index >= num_domains)
3328 return NULL;
3329
3330 if (!genpd_bus_registered)
3331 return ERR_PTR(-ENODEV);
3332
3333 /* Allocate and register device on the genpd bus. */
3334 virt_dev = kzalloc_obj(*virt_dev);
3335 if (!virt_dev)
3336 return ERR_PTR(-ENOMEM);
3337
3338 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
3339 virt_dev->bus = &genpd_bus_type;
3340 virt_dev->release = genpd_release_dev;
3341 virt_dev->of_node = of_node_get(dev->of_node);
3342
3343 ret = device_register(virt_dev);
3344 if (ret) {
3345 put_device(virt_dev);
3346 return ERR_PTR(ret);
3347 }
3348
3349 /* Try to attach the device to the PM domain at the specified index. */
3350 ret = __genpd_dev_pm_attach(virt_dev, dev, index, num_domains, false);
3351 if (ret < 1) {
3352 device_unregister(virt_dev);
3353 return ret ? ERR_PTR(ret) : NULL;
3354 }
3355
3356 pm_runtime_enable(virt_dev);
3357 genpd_queue_power_off_work(dev_to_genpd(virt_dev));
3358
3359 return virt_dev;
3360}
3361EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
3362
3363/**
3364 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
3365 * @dev: The device used to lookup the PM domain.
3366 * @name: The name of the PM domain.
3367 *
3368 * Parse device's OF node to find a PM domain specifier using the
3369 * power-domain-names DT property. For further description see
3370 * genpd_dev_pm_attach_by_id().
3371 */
3372struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
3373{
3374 int index;
3375
3376 if (!dev->of_node)
3377 return NULL;
3378
3379 index = of_property_match_string(dev->of_node, "power-domain-names",
3380 name);
3381 if (index < 0)
3382 return NULL;
3383
3384 return genpd_dev_pm_attach_by_id(dev, index);
3385}
3386
3387static const struct of_device_id idle_state_match[] = {
3388 { .compatible = "domain-idle-state", },
3389 { }
3390};
3391
3392static int genpd_parse_state(struct genpd_power_state *genpd_state,
3393 struct device_node *state_node)
3394{
3395 int err;
3396 u32 residency;
3397 u32 entry_latency, exit_latency;
3398
3399 err = of_property_read_u32(state_node, "entry-latency-us",
3400 &entry_latency);
3401 if (err) {
3402 pr_debug(" * %pOF missing entry-latency-us property\n",
3403 state_node);
3404 return -EINVAL;
3405 }
3406
3407 err = of_property_read_u32(state_node, "exit-latency-us",
3408 &exit_latency);
3409 if (err) {
3410 pr_debug(" * %pOF missing exit-latency-us property\n",
3411 state_node);
3412 return -EINVAL;
3413 }
3414
3415 err = of_property_read_u32(state_node, "min-residency-us", &residency);
3416 if (!err)
3417 genpd_state->residency_ns = 1000LL * residency;
3418
3419 of_property_read_string(state_node, "idle-state-name", &genpd_state->name);
3420
3421 genpd_state->power_on_latency_ns = 1000LL * exit_latency;
3422 genpd_state->power_off_latency_ns = 1000LL * entry_latency;
3423 genpd_state->fwnode = of_fwnode_handle(state_node);
3424
3425 return 0;
3426}
3427
3428static int genpd_iterate_idle_states(struct device_node *dn,
3429 struct genpd_power_state *states)
3430{
3431 int ret;
3432 struct of_phandle_iterator it;
3433 struct device_node *np;
3434 int i = 0;
3435
3436 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
3437 if (ret <= 0)
3438 return ret == -ENOENT ? 0 : ret;
3439
3440 /* Loop over the phandles until all the requested entry is found */
3441 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
3442 np = it.node;
3443 if (!of_match_node(idle_state_match, np))
3444 continue;
3445
3446 if (!of_device_is_available(np))
3447 continue;
3448
3449 if (states) {
3450 ret = genpd_parse_state(&states[i], np);
3451 if (ret) {
3452 pr_err("Parsing idle state node %pOF failed with err %d\n",
3453 np, ret);
3454 of_node_put(np);
3455 return ret;
3456 }
3457 }
3458 i++;
3459 }
3460
3461 return i;
3462}
3463
3464/**
3465 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
3466 *
3467 * @dn: The genpd device node
3468 * @states: The pointer to which the state array will be saved.
3469 * @n: The count of elements in the array returned from this function.
3470 *
3471 * Returns the device states parsed from the OF node. The memory for the states
3472 * is allocated by this function and is the responsibility of the caller to
3473 * free the memory after use. If any or zero compatible domain idle states is
3474 * found it returns 0 and in case of errors, a negative error code is returned.
3475 */
3476int of_genpd_parse_idle_states(struct device_node *dn,
3477 struct genpd_power_state **states, int *n)
3478{
3479 struct genpd_power_state *st;
3480 int ret;
3481
3482 ret = genpd_iterate_idle_states(dn, NULL);
3483 if (ret < 0)
3484 return ret;
3485
3486 if (!ret) {
3487 *states = NULL;
3488 *n = 0;
3489 return 0;
3490 }
3491
3492 st = kzalloc_objs(*st, ret);
3493 if (!st)
3494 return -ENOMEM;
3495
3496 ret = genpd_iterate_idle_states(dn, st);
3497 if (ret <= 0) {
3498 kfree(st);
3499 return ret < 0 ? ret : -EINVAL;
3500 }
3501
3502 *states = st;
3503 *n = ret;
3504
3505 return 0;
3506}
3507EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
3508
3509/**
3510 * of_genpd_sync_state() - A common sync_state function for genpd providers
3511 * @np: The device node the genpd provider is associated with.
3512 *
3513 * The @np that corresponds to a genpd provider may provide one or multiple
3514 * genpds. This function makes use @np to find the genpds that belongs to the
3515 * provider. For each genpd we try a power-off.
3516 */
3517void of_genpd_sync_state(struct device_node *np)
3518{
3519 struct generic_pm_domain *genpd;
3520
3521 if (!np)
3522 return;
3523
3524 mutex_lock(&gpd_list_lock);
3525 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3526 if (genpd->provider == of_fwnode_handle(np)) {
3527 genpd_lock(genpd);
3528 genpd->stay_on = false;
3529 genpd_power_off(genpd, false, 0);
3530 genpd_unlock(genpd);
3531 }
3532 }
3533 mutex_unlock(&gpd_list_lock);
3534}
3535EXPORT_SYMBOL_GPL(of_genpd_sync_state);
3536
3537static int genpd_provider_probe(struct device *dev)
3538{
3539 return 0;
3540}
3541
3542static void genpd_provider_sync_state(struct device *dev)
3543{
3544 struct generic_pm_domain *genpd = container_of(dev, struct generic_pm_domain, dev);
3545
3546 switch (genpd->sync_state) {
3547 case GENPD_SYNC_STATE_OFF:
3548 break;
3549
3550 case GENPD_SYNC_STATE_ONECELL:
3551 of_genpd_sync_state(dev->of_node);
3552 break;
3553
3554 case GENPD_SYNC_STATE_SIMPLE:
3555 genpd_lock(genpd);
3556 genpd->stay_on = false;
3557 genpd_power_off(genpd, false, 0);
3558 genpd_unlock(genpd);
3559 break;
3560
3561 default:
3562 break;
3563 }
3564}
3565
3566static struct device_driver genpd_provider_drv = {
3567 .name = "genpd_provider",
3568 .bus = &genpd_provider_bus_type,
3569 .probe = genpd_provider_probe,
3570 .sync_state = genpd_provider_sync_state,
3571 .suppress_bind_attrs = true,
3572};
3573
3574static int __init genpd_bus_init(void)
3575{
3576 int ret;
3577
3578 ret = device_register(&genpd_provider_bus);
3579 if (ret) {
3580 put_device(&genpd_provider_bus);
3581 return ret;
3582 }
3583
3584 ret = bus_register(&genpd_provider_bus_type);
3585 if (ret)
3586 goto err_dev;
3587
3588 ret = bus_register(&genpd_bus_type);
3589 if (ret)
3590 goto err_prov_bus;
3591
3592 ret = driver_register(&genpd_provider_drv);
3593 if (ret)
3594 goto err_bus;
3595
3596 genpd_bus_registered = true;
3597 return 0;
3598
3599err_bus:
3600 bus_unregister(&genpd_bus_type);
3601err_prov_bus:
3602 bus_unregister(&genpd_provider_bus_type);
3603err_dev:
3604 device_unregister(&genpd_provider_bus);
3605 return ret;
3606}
3607core_initcall(genpd_bus_init);
3608
3609#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
3610
3611
3612/*** debugfs support ***/
3613
3614#ifdef CONFIG_DEBUG_FS
3615/*
3616 * TODO: This function is a slightly modified version of rtpm_status_show
3617 * from sysfs.c, so generalize it.
3618 */
3619static void rtpm_status_str(struct seq_file *s, struct device *dev)
3620{
3621 static const char * const status_lookup[] = {
3622 [RPM_ACTIVE] = "active",
3623 [RPM_RESUMING] = "resuming",
3624 [RPM_SUSPENDED] = "suspended",
3625 [RPM_SUSPENDING] = "suspending"
3626 };
3627 const char *p = "";
3628
3629 if (dev->power.runtime_error)
3630 p = "error";
3631 else if (dev->power.disable_depth)
3632 p = "unsupported";
3633 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
3634 p = status_lookup[dev->power.runtime_status];
3635 else
3636 WARN_ON(1);
3637
3638 seq_printf(s, "%-26s ", p);
3639}
3640
3641static void perf_status_str(struct seq_file *s, struct device *dev)
3642{
3643 struct generic_pm_domain_data *gpd_data;
3644
3645 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3646
3647 seq_printf(s, "%-10u ", gpd_data->performance_state);
3648}
3649
3650static void mode_status_str(struct seq_file *s, struct device *dev)
3651{
3652 struct generic_pm_domain_data *gpd_data;
3653
3654 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3655
3656 seq_printf(s, "%2s", gpd_data->hw_mode ? "HW" : "SW");
3657}
3658
3659static int genpd_summary_one(struct seq_file *s,
3660 struct generic_pm_domain *genpd)
3661{
3662 static const char * const status_lookup[] = {
3663 [GENPD_STATE_ON] = "on",
3664 [GENPD_STATE_OFF] = "off"
3665 };
3666 struct pm_domain_data *pm_data;
3667 struct gpd_link *link;
3668 char state[16];
3669 int ret;
3670
3671 ret = genpd_lock_interruptible(genpd);
3672 if (ret)
3673 return -ERESTARTSYS;
3674
3675 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
3676 goto exit;
3677 if (!genpd_status_on(genpd))
3678 snprintf(state, sizeof(state), "%s-%u",
3679 status_lookup[genpd->status], genpd->state_idx);
3680 else
3681 snprintf(state, sizeof(state), "%s",
3682 status_lookup[genpd->status]);
3683 seq_printf(s, "%-30s %-30s %u", dev_name(&genpd->dev), state, genpd->performance_state);
3684
3685 /*
3686 * Modifications on the list require holding locks on both
3687 * parent and child, so we are safe.
3688 * Also the device name is immutable.
3689 */
3690 list_for_each_entry(link, &genpd->parent_links, parent_node) {
3691 if (list_is_first(&link->parent_node, &genpd->parent_links))
3692 seq_printf(s, "\n%48s", " ");
3693 seq_printf(s, "%s", link->child->name);
3694 if (!list_is_last(&link->parent_node, &genpd->parent_links))
3695 seq_puts(s, ", ");
3696 }
3697
3698 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3699 seq_printf(s, "\n %-30s ", dev_name(pm_data->dev));
3700 rtpm_status_str(s, pm_data->dev);
3701 perf_status_str(s, pm_data->dev);
3702 mode_status_str(s, pm_data->dev);
3703 }
3704
3705 seq_puts(s, "\n");
3706exit:
3707 genpd_unlock(genpd);
3708
3709 return 0;
3710}
3711
3712static int summary_show(struct seq_file *s, void *data)
3713{
3714 struct generic_pm_domain *genpd;
3715 int ret = 0;
3716
3717 seq_puts(s, "domain status children performance\n");
3718 seq_puts(s, " /device runtime status managed by\n");
3719 seq_puts(s, "------------------------------------------------------------------------------\n");
3720
3721 ret = mutex_lock_interruptible(&gpd_list_lock);
3722 if (ret)
3723 return -ERESTARTSYS;
3724
3725 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3726 ret = genpd_summary_one(s, genpd);
3727 if (ret)
3728 break;
3729 }
3730 mutex_unlock(&gpd_list_lock);
3731
3732 return ret;
3733}
3734
3735static int status_show(struct seq_file *s, void *data)
3736{
3737 static const char * const status_lookup[] = {
3738 [GENPD_STATE_ON] = "on",
3739 [GENPD_STATE_OFF] = "off"
3740 };
3741
3742 struct generic_pm_domain *genpd = s->private;
3743 int ret = 0;
3744
3745 ret = genpd_lock_interruptible(genpd);
3746 if (ret)
3747 return -ERESTARTSYS;
3748
3749 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3750 goto exit;
3751
3752 if (genpd->status == GENPD_STATE_OFF)
3753 seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3754 genpd->state_idx);
3755 else
3756 seq_printf(s, "%s\n", status_lookup[genpd->status]);
3757exit:
3758 genpd_unlock(genpd);
3759 return ret;
3760}
3761
3762static int sub_domains_show(struct seq_file *s, void *data)
3763{
3764 struct generic_pm_domain *genpd = s->private;
3765 struct gpd_link *link;
3766 int ret = 0;
3767
3768 ret = genpd_lock_interruptible(genpd);
3769 if (ret)
3770 return -ERESTARTSYS;
3771
3772 list_for_each_entry(link, &genpd->parent_links, parent_node)
3773 seq_printf(s, "%s\n", link->child->name);
3774
3775 genpd_unlock(genpd);
3776 return ret;
3777}
3778
3779static int idle_states_show(struct seq_file *s, void *data)
3780{
3781 struct generic_pm_domain *genpd = s->private;
3782 u64 now, delta, idle_time = 0;
3783 unsigned int i;
3784 int ret = 0;
3785
3786 ret = genpd_lock_interruptible(genpd);
3787 if (ret)
3788 return -ERESTARTSYS;
3789
3790 seq_puts(s, "State Time(ms) Usage Rejected Above Below S2idle\n");
3791
3792 for (i = 0; i < genpd->state_count; i++) {
3793 struct genpd_power_state *state = &genpd->states[i];
3794 char state_name[7];
3795
3796 idle_time += state->idle_time;
3797
3798 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3799 now = ktime_get_mono_fast_ns();
3800 if (now > genpd->accounting_time) {
3801 delta = now - genpd->accounting_time;
3802 idle_time += delta;
3803 }
3804 }
3805
3806 snprintf(state_name, ARRAY_SIZE(state_name), "S%-5d", i);
3807 do_div(idle_time, NSEC_PER_MSEC);
3808 seq_printf(s, "%-6s %-14llu %-10llu %-10llu %-10llu %-10llu %llu\n",
3809 state_name, idle_time, state->usage, state->rejected,
3810 state->above, state->below, state->usage_s2idle);
3811 }
3812
3813 genpd_unlock(genpd);
3814 return ret;
3815}
3816
3817static int idle_states_desc_show(struct seq_file *s, void *data)
3818{
3819 struct generic_pm_domain *genpd = s->private;
3820 unsigned int i;
3821 int ret = 0;
3822
3823 ret = genpd_lock_interruptible(genpd);
3824 if (ret)
3825 return -ERESTARTSYS;
3826
3827 seq_puts(s, "State Latency(us) Residency(us) Name\n");
3828
3829 for (i = 0; i < genpd->state_count; i++) {
3830 struct genpd_power_state *state = &genpd->states[i];
3831 u64 latency, residency;
3832 char state_name[7];
3833
3834 latency = state->power_off_latency_ns +
3835 state->power_on_latency_ns;
3836 do_div(latency, NSEC_PER_USEC);
3837
3838 residency = state->residency_ns;
3839 do_div(residency, NSEC_PER_USEC);
3840
3841 snprintf(state_name, ARRAY_SIZE(state_name), "S%-5d", i);
3842 seq_printf(s, "%-6s %-12llu %-14llu %s\n",
3843 state_name, latency, residency,
3844 state->name ?: "N/A");
3845 }
3846
3847 genpd_unlock(genpd);
3848 return ret;
3849}
3850
3851static int active_time_show(struct seq_file *s, void *data)
3852{
3853 struct generic_pm_domain *genpd = s->private;
3854 u64 now, on_time, delta = 0;
3855 int ret = 0;
3856
3857 ret = genpd_lock_interruptible(genpd);
3858 if (ret)
3859 return -ERESTARTSYS;
3860
3861 if (genpd->status == GENPD_STATE_ON) {
3862 now = ktime_get_mono_fast_ns();
3863 if (now > genpd->accounting_time)
3864 delta = now - genpd->accounting_time;
3865 }
3866
3867 on_time = genpd->on_time + delta;
3868 do_div(on_time, NSEC_PER_MSEC);
3869 seq_printf(s, "%llu ms\n", on_time);
3870
3871 genpd_unlock(genpd);
3872 return ret;
3873}
3874
3875static int total_idle_time_show(struct seq_file *s, void *data)
3876{
3877 struct generic_pm_domain *genpd = s->private;
3878 u64 now, delta, total = 0;
3879 unsigned int i;
3880 int ret = 0;
3881
3882 ret = genpd_lock_interruptible(genpd);
3883 if (ret)
3884 return -ERESTARTSYS;
3885
3886 for (i = 0; i < genpd->state_count; i++) {
3887 total += genpd->states[i].idle_time;
3888
3889 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3890 now = ktime_get_mono_fast_ns();
3891 if (now > genpd->accounting_time) {
3892 delta = now - genpd->accounting_time;
3893 total += delta;
3894 }
3895 }
3896 }
3897
3898 do_div(total, NSEC_PER_MSEC);
3899 seq_printf(s, "%llu ms\n", total);
3900
3901 genpd_unlock(genpd);
3902 return ret;
3903}
3904
3905
3906static int devices_show(struct seq_file *s, void *data)
3907{
3908 struct generic_pm_domain *genpd = s->private;
3909 struct pm_domain_data *pm_data;
3910 int ret = 0;
3911
3912 ret = genpd_lock_interruptible(genpd);
3913 if (ret)
3914 return -ERESTARTSYS;
3915
3916 list_for_each_entry(pm_data, &genpd->dev_list, list_node)
3917 seq_printf(s, "%s\n", dev_name(pm_data->dev));
3918
3919 genpd_unlock(genpd);
3920 return ret;
3921}
3922
3923static int perf_state_show(struct seq_file *s, void *data)
3924{
3925 struct generic_pm_domain *genpd = s->private;
3926
3927 if (genpd_lock_interruptible(genpd))
3928 return -ERESTARTSYS;
3929
3930 seq_printf(s, "%u\n", genpd->performance_state);
3931
3932 genpd_unlock(genpd);
3933 return 0;
3934}
3935
3936DEFINE_SHOW_ATTRIBUTE(summary);
3937DEFINE_SHOW_ATTRIBUTE(status);
3938DEFINE_SHOW_ATTRIBUTE(sub_domains);
3939DEFINE_SHOW_ATTRIBUTE(idle_states);
3940DEFINE_SHOW_ATTRIBUTE(idle_states_desc);
3941DEFINE_SHOW_ATTRIBUTE(active_time);
3942DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3943DEFINE_SHOW_ATTRIBUTE(devices);
3944DEFINE_SHOW_ATTRIBUTE(perf_state);
3945
3946static void genpd_debug_add(struct generic_pm_domain *genpd)
3947{
3948 struct dentry *d;
3949
3950 if (!genpd_debugfs_dir)
3951 return;
3952
3953 d = debugfs_create_dir(dev_name(&genpd->dev), genpd_debugfs_dir);
3954
3955 debugfs_create_file("current_state", 0444,
3956 d, genpd, &status_fops);
3957 debugfs_create_file("sub_domains", 0444,
3958 d, genpd, &sub_domains_fops);
3959 debugfs_create_file("idle_states", 0444,
3960 d, genpd, &idle_states_fops);
3961 debugfs_create_file("idle_states_desc", 0444,
3962 d, genpd, &idle_states_desc_fops);
3963 debugfs_create_file("active_time", 0444,
3964 d, genpd, &active_time_fops);
3965 debugfs_create_file("total_idle_time", 0444,
3966 d, genpd, &total_idle_time_fops);
3967 debugfs_create_file("devices", 0444,
3968 d, genpd, &devices_fops);
3969 if (genpd->set_performance_state)
3970 debugfs_create_file("perf_state", 0444,
3971 d, genpd, &perf_state_fops);
3972}
3973
3974static int __init genpd_debug_init(void)
3975{
3976 struct generic_pm_domain *genpd;
3977
3978 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3979
3980 debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3981 NULL, &summary_fops);
3982
3983 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3984 genpd_debug_add(genpd);
3985
3986 return 0;
3987}
3988late_initcall(genpd_debug_init);
3989
3990static void __exit genpd_debug_exit(void)
3991{
3992 debugfs_remove_recursive(genpd_debugfs_dir);
3993}
3994__exitcall(genpd_debug_exit);
3995#endif /* CONFIG_DEBUG_FS */