Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'pmdomain-v6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/linux-pm

Pull pmdomain updates from Ulf Hansson:
"pmdomain core:
- Add support for s2idle for CPU PM domains on PREEMPT_RT
- Add device managed version of dev_pm_domain_attach|detach_list()
- Improve layout of the debugfs summary table

pmdomain providers:
- amlogic: Remove obsolete vpu domain driver
- bcm: raspberrypi: Add support for devices used as wakeup-sources
- imx: Fixup clock handling for imx93 at driver remove
- rockchip: Add gating support for RK3576
- rockchip: Add support for RK3576 SoC
- Some OF parsing simplifications
- Some simplifications by using dev_err_probe() and guard()

pmdomain consumers:
- qcom/media/venus: Convert to the device managed APIs for PM domains

cpuidle-psci:
- Add support for s2idle/s2ram for the hierarchical topology on
PREEMPT_RT
- Some OF parsing simplifications"

* tag 'pmdomain-v6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/linux-pm: (39 commits)
pmdomain: core: Reduce debug summary table width
pmdomain: core: Move mode_status_str()
pmdomain: core: Fix "managed by" alignment in debug summary
pmdomain: core: Harden inter-column space in debug summary
pmdomain: rockchip: Add gating masks for rk3576
pmdomain: rockchip: Add gating support
pmdomain: rockchip: Simplify dropping OF node reference
pmdomain: mediatek: make use of dev_err_cast_probe()
pmdomain: imx93-pd: drop the context variable "init_off"
pmdomain: imx93-pd: don't unprepare clocks on driver remove
pmdomain: imx93-pd: replace dev_err() with dev_err_probe()
pmdomain: qcom: rpmpd: Simplify locking with guard()
pmdomain: qcom: rpmhpd: Simplify locking with guard()
pmdomain: qcom: cpr: Simplify locking with guard()
pmdomain: qcom: cpr: Simplify with dev_err_probe()
pmdomain: imx: gpcv2: Simplify with scoped for each OF child loop
pmdomain: imx: gpc: Simplify with scoped for each OF child loop
pmdomain: rockchip: SimplUlf Hanssonify locking with guard()
pmdomain: rockchip: Simplify with scoped for each OF child loop
pmdomain: qcom-cpr: Use scope based of_node_put() to simplify code.
...

+378 -608
+1
Documentation/devicetree/bindings/power/rockchip,power-controller.yaml
··· 41 41 - rockchip,rk3368-power-controller 42 42 - rockchip,rk3399-power-controller 43 43 - rockchip,rk3568-power-controller 44 + - rockchip,rk3576-power-controller 44 45 - rockchip,rk3588-power-controller 45 46 - rockchip,rv1126-power-controller 46 47
+45
drivers/base/power/common.c
··· 277 277 EXPORT_SYMBOL_GPL(dev_pm_domain_attach_list); 278 278 279 279 /** 280 + * devm_pm_domain_detach_list - devres-enabled version of dev_pm_domain_detach_list. 281 + * @_list: The list of PM domains to detach. 282 + * 283 + * This function reverse the actions from devm_pm_domain_attach_list(). 284 + * it will be invoked during the remove phase from drivers implicitly if driver 285 + * uses devm_pm_domain_attach_list() to attach the PM domains. 286 + */ 287 + static void devm_pm_domain_detach_list(void *_list) 288 + { 289 + struct dev_pm_domain_list *list = _list; 290 + 291 + dev_pm_domain_detach_list(list); 292 + } 293 + 294 + /** 295 + * devm_pm_domain_attach_list - devres-enabled version of dev_pm_domain_attach_list 296 + * @dev: The device used to lookup the PM domains for. 297 + * @data: The data used for attaching to the PM domains. 298 + * @list: An out-parameter with an allocated list of attached PM domains. 299 + * 300 + * NOTE: this will also handle calling devm_pm_domain_detach_list() for 301 + * you during remove phase. 302 + * 303 + * Returns the number of attached PM domains or a negative error code in case of 304 + * a failure. 305 + */ 306 + int devm_pm_domain_attach_list(struct device *dev, 307 + const struct dev_pm_domain_attach_data *data, 308 + struct dev_pm_domain_list **list) 309 + { 310 + int ret, num_pds; 311 + 312 + num_pds = dev_pm_domain_attach_list(dev, data, list); 313 + if (num_pds <= 0) 314 + return num_pds; 315 + 316 + ret = devm_add_action_or_reset(dev, devm_pm_domain_detach_list, *list); 317 + if (ret) 318 + return ret; 319 + 320 + return num_pds; 321 + } 322 + EXPORT_SYMBOL_GPL(devm_pm_domain_attach_list); 323 + 324 + /** 280 325 * dev_pm_domain_detach - Detach a device from its PM domain. 281 326 * @dev: Device to detach. 282 327 * @power_off: Used to indicate whether we should power off the device.
+9 -8
drivers/cpuidle/cpuidle-psci-domain.c
··· 67 67 68 68 /* 69 69 * Allow power off when OSI has been successfully enabled. 70 - * PREEMPT_RT is not yet ready to enter domain idle states. 70 + * On a PREEMPT_RT based configuration the domain idle states are 71 + * supported, but only during system-wide suspend. 71 72 */ 72 - if (use_osi && !IS_ENABLED(CONFIG_PREEMPT_RT)) 73 + if (use_osi) { 73 74 pd->power_off = psci_pd_power_off; 74 - else 75 + if (IS_ENABLED(CONFIG_PREEMPT_RT)) 76 + pd->flags |= GENPD_FLAG_RPM_ALWAYS_ON; 77 + } else { 75 78 pd->flags |= GENPD_FLAG_ALWAYS_ON; 79 + } 76 80 77 81 /* Use governor for CPU PM domains if it has some states to manage. */ 78 82 pd_gov = pd->states ? &pm_domain_cpu_gov : NULL; ··· 142 138 static int psci_cpuidle_domain_probe(struct platform_device *pdev) 143 139 { 144 140 struct device_node *np = pdev->dev.of_node; 145 - struct device_node *node; 146 141 bool use_osi = psci_has_osi_support(); 147 142 int ret = 0, pd_count = 0; 148 143 ··· 152 149 * Parse child nodes for the "#power-domain-cells" property and 153 150 * initialize a genpd/genpd-of-provider pair when it's found. 154 151 */ 155 - for_each_child_of_node(np, node) { 152 + for_each_child_of_node_scoped(np, node) { 156 153 if (!of_property_present(node, "#power-domain-cells")) 157 154 continue; 158 155 159 156 ret = psci_pd_init(node, use_osi); 160 - if (ret) { 161 - of_node_put(node); 157 + if (ret) 162 158 goto exit; 163 - } 164 159 165 160 pd_count++; 166 161 }
+17 -9
drivers/cpuidle/cpuidle-psci.c
··· 37 37 38 38 static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data); 39 39 static DEFINE_PER_CPU(u32, domain_state); 40 + static bool psci_cpuidle_use_syscore; 40 41 static bool psci_cpuidle_use_cpuhp; 41 42 42 43 void psci_set_domain_state(u32 state) ··· 167 166 .resume = psci_idle_syscore_resume, 168 167 }; 169 168 169 + static void psci_idle_init_syscore(void) 170 + { 171 + if (psci_cpuidle_use_syscore) 172 + register_syscore_ops(&psci_idle_syscore_ops); 173 + } 174 + 170 175 static void psci_idle_init_cpuhp(void) 171 176 { 172 177 int err; 173 178 174 179 if (!psci_cpuidle_use_cpuhp) 175 180 return; 176 - 177 - register_syscore_ops(&psci_idle_syscore_ops); 178 181 179 182 err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING, 180 183 "cpuidle/psci:online", ··· 227 222 if (!psci_has_osi_support()) 228 223 return 0; 229 224 230 - if (IS_ENABLED(CONFIG_PREEMPT_RT)) 231 - return 0; 232 - 233 225 data->dev = dt_idle_attach_cpu(cpu, "psci"); 234 226 if (IS_ERR_OR_NULL(data->dev)) 235 227 return PTR_ERR_OR_ZERO(data->dev); 236 228 229 + psci_cpuidle_use_syscore = true; 230 + 237 231 /* 238 232 * Using the deepest state for the CPU to trigger a potential selection 239 233 * of a shared state for the domain, assumes the domain states are all 240 - * deeper states. 234 + * deeper states. On PREEMPT_RT the hierarchical topology is limited to 235 + * s2ram and s2idle. 241 236 */ 242 - drv->states[state_count - 1].flags |= CPUIDLE_FLAG_RCU_IDLE; 243 - drv->states[state_count - 1].enter = psci_enter_domain_idle_state; 244 237 drv->states[state_count - 1].enter_s2idle = psci_enter_s2idle_domain_idle_state; 245 - psci_cpuidle_use_cpuhp = true; 238 + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { 239 + drv->states[state_count - 1].enter = psci_enter_domain_idle_state; 240 + psci_cpuidle_use_cpuhp = true; 241 + } 246 242 247 243 return 0; 248 244 } ··· 319 313 struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu); 320 314 321 315 dt_idle_detach_cpu(data->dev); 316 + psci_cpuidle_use_syscore = false; 322 317 psci_cpuidle_use_cpuhp = false; 323 318 } 324 319 ··· 416 409 goto out_fail; 417 410 } 418 411 412 + psci_idle_init_syscore(); 419 413 psci_idle_init_cpuhp(); 420 414 return 0; 421 415
+4 -10
drivers/cpuidle/dt_idle_genpd.c
··· 130 130 131 131 int dt_idle_pd_init_topology(struct device_node *np) 132 132 { 133 - struct device_node *node; 134 133 struct of_phandle_args child, parent; 135 134 int ret; 136 135 137 - for_each_child_of_node(np, node) { 136 + for_each_child_of_node_scoped(np, node) { 138 137 if (of_parse_phandle_with_args(node, "power-domains", 139 138 "#power-domain-cells", 0, &parent)) 140 139 continue; ··· 142 143 child.args_count = 0; 143 144 ret = of_genpd_add_subdomain(&parent, &child); 144 145 of_node_put(parent.np); 145 - if (ret) { 146 - of_node_put(node); 146 + if (ret) 147 147 return ret; 148 - } 149 148 } 150 149 151 150 return 0; ··· 151 154 152 155 int dt_idle_pd_remove_topology(struct device_node *np) 153 156 { 154 - struct device_node *node; 155 157 struct of_phandle_args child, parent; 156 158 int ret; 157 159 158 - for_each_child_of_node(np, node) { 160 + for_each_child_of_node_scoped(np, node) { 159 161 if (of_parse_phandle_with_args(node, "power-domains", 160 162 "#power-domain-cells", 0, &parent)) 161 163 continue; ··· 163 167 child.args_count = 0; 164 168 ret = of_genpd_remove_subdomain(&parent, &child); 165 169 of_node_put(parent.np); 166 - if (ret) { 167 - of_node_put(node); 170 + if (ret) 168 171 return ret; 169 - } 170 172 } 171 173 172 174 return 0;
+1 -4
drivers/media/platform/qcom/venus/pm_helpers.c
··· 876 876 if (!res->vcodec_pmdomains_num) 877 877 goto skip_pmdomains; 878 878 879 - ret = dev_pm_domain_attach_list(dev, &vcodec_data, &core->pmdomains); 879 + ret = devm_pm_domain_attach_list(dev, &vcodec_data, &core->pmdomains); 880 880 if (ret < 0) 881 881 return ret; 882 882 ··· 902 902 return 0; 903 903 904 904 opp_attach_err: 905 - dev_pm_domain_detach_list(core->pmdomains); 906 905 return ret; 907 906 } 908 907 909 908 static void vcodec_domains_put(struct venus_core *core) 910 909 { 911 - dev_pm_domain_detach_list(core->pmdomains); 912 - 913 910 if (!core->has_opp_table) 914 911 return; 915 912
-11
drivers/pmdomain/amlogic/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 menu "Amlogic PM Domains" 3 3 4 - config MESON_GX_PM_DOMAINS 5 - tristate "Amlogic Meson GX Power Domains driver" 6 - depends on ARCH_MESON || COMPILE_TEST 7 - depends on PM && OF 8 - default ARCH_MESON 9 - select PM_GENERIC_DOMAINS 10 - select PM_GENERIC_DOMAINS_OF 11 - help 12 - Say yes to expose Amlogic Meson GX Power Domains as 13 - Generic Power Domains. 14 - 15 4 config MESON_EE_PM_DOMAINS 16 5 tristate "Amlogic Meson Everything-Else Power Domains driver" 17 6 depends on ARCH_MESON || COMPILE_TEST
-1
drivers/pmdomain/amlogic/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 - obj-$(CONFIG_MESON_GX_PM_DOMAINS) += meson-gx-pwrc-vpu.o 3 2 obj-$(CONFIG_MESON_EE_PM_DOMAINS) += meson-ee-pwrc.o 4 3 obj-$(CONFIG_MESON_SECURE_PM_DOMAINS) += meson-secure-pwrc.o
-380
drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c
··· 1 - /* 2 - * Copyright (c) 2017 BayLibre, SAS 3 - * Author: Neil Armstrong <narmstrong@baylibre.com> 4 - * 5 - * SPDX-License-Identifier: GPL-2.0+ 6 - */ 7 - 8 - #include <linux/platform_device.h> 9 - #include <linux/pm_domain.h> 10 - #include <linux/bitfield.h> 11 - #include <linux/regmap.h> 12 - #include <linux/mfd/syscon.h> 13 - #include <linux/of.h> 14 - #include <linux/reset.h> 15 - #include <linux/clk.h> 16 - #include <linux/module.h> 17 - 18 - /* AO Offsets */ 19 - 20 - #define AO_RTI_GEN_PWR_SLEEP0 (0x3a << 2) 21 - 22 - #define GEN_PWR_VPU_HDMI BIT(8) 23 - #define GEN_PWR_VPU_HDMI_ISO BIT(9) 24 - 25 - /* HHI Offsets */ 26 - 27 - #define HHI_MEM_PD_REG0 (0x40 << 2) 28 - #define HHI_VPU_MEM_PD_REG0 (0x41 << 2) 29 - #define HHI_VPU_MEM_PD_REG1 (0x42 << 2) 30 - #define HHI_VPU_MEM_PD_REG2 (0x4d << 2) 31 - 32 - struct meson_gx_pwrc_vpu { 33 - struct generic_pm_domain genpd; 34 - struct regmap *regmap_ao; 35 - struct regmap *regmap_hhi; 36 - struct reset_control *rstc; 37 - struct clk *vpu_clk; 38 - struct clk *vapb_clk; 39 - }; 40 - 41 - static inline 42 - struct meson_gx_pwrc_vpu *genpd_to_pd(struct generic_pm_domain *d) 43 - { 44 - return container_of(d, struct meson_gx_pwrc_vpu, genpd); 45 - } 46 - 47 - static int meson_gx_pwrc_vpu_power_off(struct generic_pm_domain *genpd) 48 - { 49 - struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd); 50 - int i; 51 - 52 - regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, 53 - GEN_PWR_VPU_HDMI_ISO, GEN_PWR_VPU_HDMI_ISO); 54 - udelay(20); 55 - 56 - /* Power Down Memories */ 57 - for (i = 0; i < 32; i += 2) { 58 - regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0, 59 - 0x3 << i, 0x3 << i); 60 - udelay(5); 61 - } 62 - for (i = 0; i < 32; i += 2) { 63 - regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1, 64 - 0x3 << i, 0x3 << i); 65 - udelay(5); 66 - } 67 - for (i = 8; i < 16; i++) { 68 - regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0, 69 - BIT(i), BIT(i)); 70 - udelay(5); 71 - } 72 - udelay(20); 73 - 74 - regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, 75 - GEN_PWR_VPU_HDMI, GEN_PWR_VPU_HDMI); 76 - 77 - msleep(20); 78 - 79 - clk_disable_unprepare(pd->vpu_clk); 80 - clk_disable_unprepare(pd->vapb_clk); 81 - 82 - return 0; 83 - } 84 - 85 - static int meson_g12a_pwrc_vpu_power_off(struct generic_pm_domain *genpd) 86 - { 87 - struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd); 88 - int i; 89 - 90 - regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, 91 - GEN_PWR_VPU_HDMI_ISO, GEN_PWR_VPU_HDMI_ISO); 92 - udelay(20); 93 - 94 - /* Power Down Memories */ 95 - for (i = 0; i < 32; i += 2) { 96 - regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0, 97 - 0x3 << i, 0x3 << i); 98 - udelay(5); 99 - } 100 - for (i = 0; i < 32; i += 2) { 101 - regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1, 102 - 0x3 << i, 0x3 << i); 103 - udelay(5); 104 - } 105 - for (i = 0; i < 32; i += 2) { 106 - regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG2, 107 - 0x3 << i, 0x3 << i); 108 - udelay(5); 109 - } 110 - for (i = 8; i < 16; i++) { 111 - regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0, 112 - BIT(i), BIT(i)); 113 - udelay(5); 114 - } 115 - udelay(20); 116 - 117 - regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, 118 - GEN_PWR_VPU_HDMI, GEN_PWR_VPU_HDMI); 119 - 120 - msleep(20); 121 - 122 - clk_disable_unprepare(pd->vpu_clk); 123 - clk_disable_unprepare(pd->vapb_clk); 124 - 125 - return 0; 126 - } 127 - 128 - static int meson_gx_pwrc_vpu_setup_clk(struct meson_gx_pwrc_vpu *pd) 129 - { 130 - int ret; 131 - 132 - ret = clk_prepare_enable(pd->vpu_clk); 133 - if (ret) 134 - return ret; 135 - 136 - ret = clk_prepare_enable(pd->vapb_clk); 137 - if (ret) 138 - clk_disable_unprepare(pd->vpu_clk); 139 - 140 - return ret; 141 - } 142 - 143 - static int meson_gx_pwrc_vpu_power_on(struct generic_pm_domain *genpd) 144 - { 145 - struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd); 146 - int ret; 147 - int i; 148 - 149 - regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, 150 - GEN_PWR_VPU_HDMI, 0); 151 - udelay(20); 152 - 153 - /* Power Up Memories */ 154 - for (i = 0; i < 32; i += 2) { 155 - regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0, 156 - 0x3 << i, 0); 157 - udelay(5); 158 - } 159 - 160 - for (i = 0; i < 32; i += 2) { 161 - regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1, 162 - 0x3 << i, 0); 163 - udelay(5); 164 - } 165 - 166 - for (i = 8; i < 16; i++) { 167 - regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0, 168 - BIT(i), 0); 169 - udelay(5); 170 - } 171 - udelay(20); 172 - 173 - ret = reset_control_assert(pd->rstc); 174 - if (ret) 175 - return ret; 176 - 177 - regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, 178 - GEN_PWR_VPU_HDMI_ISO, 0); 179 - 180 - ret = reset_control_deassert(pd->rstc); 181 - if (ret) 182 - return ret; 183 - 184 - ret = meson_gx_pwrc_vpu_setup_clk(pd); 185 - if (ret) 186 - return ret; 187 - 188 - return 0; 189 - } 190 - 191 - static int meson_g12a_pwrc_vpu_power_on(struct generic_pm_domain *genpd) 192 - { 193 - struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd); 194 - int ret; 195 - int i; 196 - 197 - regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, 198 - GEN_PWR_VPU_HDMI, 0); 199 - udelay(20); 200 - 201 - /* Power Up Memories */ 202 - for (i = 0; i < 32; i += 2) { 203 - regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0, 204 - 0x3 << i, 0); 205 - udelay(5); 206 - } 207 - 208 - for (i = 0; i < 32; i += 2) { 209 - regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1, 210 - 0x3 << i, 0); 211 - udelay(5); 212 - } 213 - 214 - for (i = 0; i < 32; i += 2) { 215 - regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG2, 216 - 0x3 << i, 0); 217 - udelay(5); 218 - } 219 - 220 - for (i = 8; i < 16; i++) { 221 - regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0, 222 - BIT(i), 0); 223 - udelay(5); 224 - } 225 - udelay(20); 226 - 227 - ret = reset_control_assert(pd->rstc); 228 - if (ret) 229 - return ret; 230 - 231 - regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, 232 - GEN_PWR_VPU_HDMI_ISO, 0); 233 - 234 - ret = reset_control_deassert(pd->rstc); 235 - if (ret) 236 - return ret; 237 - 238 - ret = meson_gx_pwrc_vpu_setup_clk(pd); 239 - if (ret) 240 - return ret; 241 - 242 - return 0; 243 - } 244 - 245 - static bool meson_gx_pwrc_vpu_get_power(struct meson_gx_pwrc_vpu *pd) 246 - { 247 - u32 reg; 248 - 249 - regmap_read(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, &reg); 250 - 251 - return (reg & GEN_PWR_VPU_HDMI); 252 - } 253 - 254 - static struct meson_gx_pwrc_vpu vpu_hdmi_pd = { 255 - .genpd = { 256 - .name = "vpu_hdmi", 257 - .power_off = meson_gx_pwrc_vpu_power_off, 258 - .power_on = meson_gx_pwrc_vpu_power_on, 259 - }, 260 - }; 261 - 262 - static struct meson_gx_pwrc_vpu vpu_hdmi_pd_g12a = { 263 - .genpd = { 264 - .name = "vpu_hdmi", 265 - .power_off = meson_g12a_pwrc_vpu_power_off, 266 - .power_on = meson_g12a_pwrc_vpu_power_on, 267 - }, 268 - }; 269 - 270 - static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev) 271 - { 272 - const struct meson_gx_pwrc_vpu *vpu_pd_match; 273 - struct regmap *regmap_ao, *regmap_hhi; 274 - struct meson_gx_pwrc_vpu *vpu_pd; 275 - struct device_node *parent_np; 276 - struct reset_control *rstc; 277 - struct clk *vpu_clk; 278 - struct clk *vapb_clk; 279 - bool powered_off; 280 - int ret; 281 - 282 - vpu_pd_match = of_device_get_match_data(&pdev->dev); 283 - if (!vpu_pd_match) { 284 - dev_err(&pdev->dev, "failed to get match data\n"); 285 - return -ENODEV; 286 - } 287 - 288 - vpu_pd = devm_kzalloc(&pdev->dev, sizeof(*vpu_pd), GFP_KERNEL); 289 - if (!vpu_pd) 290 - return -ENOMEM; 291 - 292 - memcpy(vpu_pd, vpu_pd_match, sizeof(*vpu_pd)); 293 - 294 - parent_np = of_get_parent(pdev->dev.of_node); 295 - regmap_ao = syscon_node_to_regmap(parent_np); 296 - of_node_put(parent_np); 297 - if (IS_ERR(regmap_ao)) { 298 - dev_err(&pdev->dev, "failed to get regmap\n"); 299 - return PTR_ERR(regmap_ao); 300 - } 301 - 302 - regmap_hhi = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, 303 - "amlogic,hhi-sysctrl"); 304 - if (IS_ERR(regmap_hhi)) { 305 - dev_err(&pdev->dev, "failed to get HHI regmap\n"); 306 - return PTR_ERR(regmap_hhi); 307 - } 308 - 309 - rstc = devm_reset_control_array_get_exclusive(&pdev->dev); 310 - if (IS_ERR(rstc)) 311 - return dev_err_probe(&pdev->dev, PTR_ERR(rstc), 312 - "failed to get reset lines\n"); 313 - 314 - vpu_clk = devm_clk_get(&pdev->dev, "vpu"); 315 - if (IS_ERR(vpu_clk)) { 316 - dev_err(&pdev->dev, "vpu clock request failed\n"); 317 - return PTR_ERR(vpu_clk); 318 - } 319 - 320 - vapb_clk = devm_clk_get(&pdev->dev, "vapb"); 321 - if (IS_ERR(vapb_clk)) { 322 - dev_err(&pdev->dev, "vapb clock request failed\n"); 323 - return PTR_ERR(vapb_clk); 324 - } 325 - 326 - vpu_pd->regmap_ao = regmap_ao; 327 - vpu_pd->regmap_hhi = regmap_hhi; 328 - vpu_pd->rstc = rstc; 329 - vpu_pd->vpu_clk = vpu_clk; 330 - vpu_pd->vapb_clk = vapb_clk; 331 - 332 - platform_set_drvdata(pdev, vpu_pd); 333 - 334 - powered_off = meson_gx_pwrc_vpu_get_power(vpu_pd); 335 - 336 - /* If already powered, sync the clock states */ 337 - if (!powered_off) { 338 - ret = meson_gx_pwrc_vpu_setup_clk(vpu_pd); 339 - if (ret) 340 - return ret; 341 - } 342 - 343 - vpu_pd->genpd.flags = GENPD_FLAG_ALWAYS_ON; 344 - pm_genpd_init(&vpu_pd->genpd, NULL, powered_off); 345 - 346 - return of_genpd_add_provider_simple(pdev->dev.of_node, 347 - &vpu_pd->genpd); 348 - } 349 - 350 - static void meson_gx_pwrc_vpu_shutdown(struct platform_device *pdev) 351 - { 352 - struct meson_gx_pwrc_vpu *vpu_pd = platform_get_drvdata(pdev); 353 - bool powered_off; 354 - 355 - powered_off = meson_gx_pwrc_vpu_get_power(vpu_pd); 356 - if (!powered_off) 357 - vpu_pd->genpd.power_off(&vpu_pd->genpd); 358 - } 359 - 360 - static const struct of_device_id meson_gx_pwrc_vpu_match_table[] = { 361 - { .compatible = "amlogic,meson-gx-pwrc-vpu", .data = &vpu_hdmi_pd }, 362 - { 363 - .compatible = "amlogic,meson-g12a-pwrc-vpu", 364 - .data = &vpu_hdmi_pd_g12a 365 - }, 366 - { /* sentinel */ } 367 - }; 368 - MODULE_DEVICE_TABLE(of, meson_gx_pwrc_vpu_match_table); 369 - 370 - static struct platform_driver meson_gx_pwrc_vpu_driver = { 371 - .probe = meson_gx_pwrc_vpu_probe, 372 - .shutdown = meson_gx_pwrc_vpu_shutdown, 373 - .driver = { 374 - .name = "meson_gx_pwrc_vpu", 375 - .of_match_table = meson_gx_pwrc_vpu_match_table, 376 - }, 377 - }; 378 - module_platform_driver(meson_gx_pwrc_vpu_driver); 379 - MODULE_DESCRIPTION("Amlogic Meson GX Power Domains driver"); 380 - MODULE_LICENSE("GPL v2");
+1 -1
drivers/pmdomain/apple/pmgr-pwrstate.c
··· 177 177 return !!(reg & APPLE_PMGR_RESET); 178 178 } 179 179 180 - const struct reset_control_ops apple_pmgr_reset_ops = { 180 + static const struct reset_control_ops apple_pmgr_reset_ops = { 181 181 .assert = apple_pmgr_reset_assert, 182 182 .deassert = apple_pmgr_reset_deassert, 183 183 .reset = apple_pmgr_reset_reset,
+25 -18
drivers/pmdomain/bcm/raspberrypi-power.c
··· 41 41 */ 42 42 struct rpi_power_domain_packet { 43 43 u32 domain; 44 - u32 on; 44 + u32 state; 45 45 }; 46 46 47 47 /* 48 48 * Asks the firmware to enable or disable power on a specific power 49 49 * domain. 50 50 */ 51 - static int rpi_firmware_set_power(struct rpi_power_domain *rpi_domain, bool on) 51 + static int rpi_firmware_set_power(struct generic_pm_domain *domain, bool on) 52 52 { 53 + struct rpi_power_domain *rpi_domain = 54 + container_of(domain, struct rpi_power_domain, base); 55 + bool old_interface = rpi_domain->old_interface; 53 56 struct rpi_power_domain_packet packet; 57 + int ret; 54 58 55 59 packet.domain = rpi_domain->domain; 56 - packet.on = on; 57 - return rpi_firmware_property(rpi_domain->fw, 58 - rpi_domain->old_interface ? 59 - RPI_FIRMWARE_SET_POWER_STATE : 60 - RPI_FIRMWARE_SET_DOMAIN_STATE, 61 - &packet, sizeof(packet)); 60 + packet.state = on; 61 + 62 + ret = rpi_firmware_property(rpi_domain->fw, old_interface ? 63 + RPI_FIRMWARE_SET_POWER_STATE : 64 + RPI_FIRMWARE_SET_DOMAIN_STATE, 65 + &packet, sizeof(packet)); 66 + if (ret) 67 + dev_err(&domain->dev, "Failed to set %s to %u (%d)\n", 68 + old_interface ? "power" : "domain", on, ret); 69 + else 70 + dev_dbg(&domain->dev, "Set %s to %u\n", 71 + old_interface ? "power" : "domain", on); 72 + 73 + return ret; 62 74 } 63 75 64 76 static int rpi_domain_off(struct generic_pm_domain *domain) 65 77 { 66 - struct rpi_power_domain *rpi_domain = 67 - container_of(domain, struct rpi_power_domain, base); 68 - 69 - return rpi_firmware_set_power(rpi_domain, false); 78 + return rpi_firmware_set_power(domain, false); 70 79 } 71 80 72 81 static int rpi_domain_on(struct generic_pm_domain *domain) 73 82 { 74 - struct rpi_power_domain *rpi_domain = 75 - container_of(domain, struct rpi_power_domain, base); 76 - 77 - return rpi_firmware_set_power(rpi_domain, true); 83 + return rpi_firmware_set_power(domain, true); 78 84 } 79 85 80 86 static void rpi_common_init_power_domain(struct rpi_power_domains *rpi_domains, ··· 91 85 dom->fw = rpi_domains->fw; 92 86 93 87 dom->base.name = name; 88 + dom->base.flags = GENPD_FLAG_ACTIVE_WAKEUP; 94 89 dom->base.power_on = rpi_domain_on; 95 90 dom->base.power_off = rpi_domain_off; 96 91 ··· 149 142 int ret; 150 143 151 144 packet.domain = RPI_POWER_DOMAIN_ARM; 152 - packet.on = ~0; 145 + packet.state = ~0; 153 146 154 147 ret = rpi_firmware_property(rpi_domains->fw, 155 148 RPI_FIRMWARE_GET_DOMAIN_STATE, 156 149 &packet, sizeof(packet)); 157 150 158 - return ret == 0 && packet.on != ~0; 151 + return ret == 0 && packet.state != ~0; 159 152 } 160 153 161 154 static int rpi_power_probe(struct platform_device *pdev)
+67 -37
drivers/pmdomain/core.c
··· 117 117 .unlock = genpd_unlock_spin, 118 118 }; 119 119 120 + static void genpd_lock_raw_spin(struct generic_pm_domain *genpd) 121 + __acquires(&genpd->raw_slock) 122 + { 123 + unsigned long flags; 124 + 125 + raw_spin_lock_irqsave(&genpd->raw_slock, flags); 126 + genpd->raw_lock_flags = flags; 127 + } 128 + 129 + static void genpd_lock_nested_raw_spin(struct generic_pm_domain *genpd, 130 + int depth) 131 + __acquires(&genpd->raw_slock) 132 + { 133 + unsigned long flags; 134 + 135 + raw_spin_lock_irqsave_nested(&genpd->raw_slock, flags, depth); 136 + genpd->raw_lock_flags = flags; 137 + } 138 + 139 + static int genpd_lock_interruptible_raw_spin(struct generic_pm_domain *genpd) 140 + __acquires(&genpd->raw_slock) 141 + { 142 + unsigned long flags; 143 + 144 + raw_spin_lock_irqsave(&genpd->raw_slock, flags); 145 + genpd->raw_lock_flags = flags; 146 + return 0; 147 + } 148 + 149 + static void genpd_unlock_raw_spin(struct generic_pm_domain *genpd) 150 + __releases(&genpd->raw_slock) 151 + { 152 + raw_spin_unlock_irqrestore(&genpd->raw_slock, genpd->raw_lock_flags); 153 + } 154 + 155 + static const struct genpd_lock_ops genpd_raw_spin_ops = { 156 + .lock = genpd_lock_raw_spin, 157 + .lock_nested = genpd_lock_nested_raw_spin, 158 + .lock_interruptible = genpd_lock_interruptible_raw_spin, 159 + .unlock = genpd_unlock_raw_spin, 160 + }; 161 + 120 162 #define genpd_lock(p) p->lock_ops->lock(p) 121 163 #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d) 122 164 #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p) ··· 1800 1758 genpd_lock(genpd); 1801 1759 1802 1760 genpd_set_cpumask(genpd, gpd_data->cpu); 1803 - dev_pm_domain_set(dev, &genpd->domain); 1804 1761 1805 1762 genpd->device_count++; 1806 1763 if (gd) ··· 1808 1767 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1809 1768 1810 1769 genpd_unlock(genpd); 1770 + dev_pm_domain_set(dev, &genpd->domain); 1811 1771 out: 1812 1772 if (ret) 1813 1773 genpd_free_dev_data(dev, gpd_data); ··· 1865 1823 genpd->gd->max_off_time_changed = true; 1866 1824 1867 1825 genpd_clear_cpumask(genpd, gpd_data->cpu); 1868 - dev_pm_domain_set(dev, NULL); 1869 1826 1870 1827 list_del_init(&pdd->list_node); 1871 1828 1872 1829 genpd_unlock(genpd); 1830 + 1831 + dev_pm_domain_set(dev, NULL); 1873 1832 1874 1833 if (genpd->detach_dev) 1875 1834 genpd->detach_dev(genpd, dev); ··· 2186 2143 2187 2144 static void genpd_lock_init(struct generic_pm_domain *genpd) 2188 2145 { 2189 - if (genpd_is_irq_safe(genpd)) { 2146 + if (genpd_is_cpu_domain(genpd)) { 2147 + raw_spin_lock_init(&genpd->raw_slock); 2148 + genpd->lock_ops = &genpd_raw_spin_ops; 2149 + } else if (genpd_is_irq_safe(genpd)) { 2190 2150 spin_lock_init(&genpd->slock); 2191 2151 genpd->lock_ops = &genpd_spin_ops; 2192 2152 } else { ··· 3227 3181 else 3228 3182 WARN_ON(1); 3229 3183 3230 - seq_printf(s, "%-25s ", p); 3184 + seq_printf(s, "%-26s ", p); 3185 + } 3186 + 3187 + static void perf_status_str(struct seq_file *s, struct device *dev) 3188 + { 3189 + struct generic_pm_domain_data *gpd_data; 3190 + 3191 + gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 3192 + 3193 + seq_printf(s, "%-10u ", gpd_data->performance_state); 3231 3194 } 3232 3195 3233 3196 static void mode_status_str(struct seq_file *s, struct device *dev) ··· 3245 3190 3246 3191 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 3247 3192 3248 - seq_printf(s, "%20s", gpd_data->hw_mode ? "HW" : "SW"); 3249 - } 3250 - 3251 - static void perf_status_str(struct seq_file *s, struct device *dev) 3252 - { 3253 - struct generic_pm_domain_data *gpd_data; 3254 - 3255 - gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 3256 - seq_put_decimal_ull(s, "", gpd_data->performance_state); 3193 + seq_printf(s, "%2s", gpd_data->hw_mode ? "HW" : "SW"); 3257 3194 } 3258 3195 3259 3196 static int genpd_summary_one(struct seq_file *s, ··· 3256 3209 [GENPD_STATE_OFF] = "off" 3257 3210 }; 3258 3211 struct pm_domain_data *pm_data; 3259 - const char *kobj_path; 3260 3212 struct gpd_link *link; 3261 3213 char state[16]; 3262 3214 int ret; ··· 3272 3226 else 3273 3227 snprintf(state, sizeof(state), "%s", 3274 3228 status_lookup[genpd->status]); 3275 - seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state); 3229 + seq_printf(s, "%-30s %-30s %u", genpd->name, state, genpd->performance_state); 3276 3230 3277 3231 /* 3278 3232 * Modifications on the list require holding locks on both ··· 3288 3242 } 3289 3243 3290 3244 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 3291 - kobj_path = kobject_get_path(&pm_data->dev->kobj, 3292 - genpd_is_irq_safe(genpd) ? 3293 - GFP_ATOMIC : GFP_KERNEL); 3294 - if (kobj_path == NULL) 3295 - continue; 3296 - 3297 - seq_printf(s, "\n %-50s ", kobj_path); 3245 + seq_printf(s, "\n %-30s ", dev_name(pm_data->dev)); 3298 3246 rtpm_status_str(s, pm_data->dev); 3299 3247 perf_status_str(s, pm_data->dev); 3300 3248 mode_status_str(s, pm_data->dev); 3301 - kfree(kobj_path); 3302 3249 } 3303 3250 3304 3251 seq_puts(s, "\n"); ··· 3306 3267 struct generic_pm_domain *genpd; 3307 3268 int ret = 0; 3308 3269 3309 - seq_puts(s, "domain status children performance\n"); 3310 - seq_puts(s, " /device runtime status managed by\n"); 3311 - seq_puts(s, "------------------------------------------------------------------------------------------------------------\n"); 3270 + seq_puts(s, "domain status children performance\n"); 3271 + seq_puts(s, " /device runtime status managed by\n"); 3272 + seq_puts(s, "------------------------------------------------------------------------------\n"); 3312 3273 3313 3274 ret = mutex_lock_interruptible(&gpd_list_lock); 3314 3275 if (ret) ··· 3460 3421 { 3461 3422 struct generic_pm_domain *genpd = s->private; 3462 3423 struct pm_domain_data *pm_data; 3463 - const char *kobj_path; 3464 3424 int ret = 0; 3465 3425 3466 3426 ret = genpd_lock_interruptible(genpd); 3467 3427 if (ret) 3468 3428 return -ERESTARTSYS; 3469 3429 3470 - list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 3471 - kobj_path = kobject_get_path(&pm_data->dev->kobj, 3472 - genpd_is_irq_safe(genpd) ? 3473 - GFP_ATOMIC : GFP_KERNEL); 3474 - if (kobj_path == NULL) 3475 - continue; 3476 - 3477 - seq_printf(s, "%s\n", kobj_path); 3478 - kfree(kobj_path); 3479 - } 3430 + list_for_each_entry(pm_data, &genpd->dev_list, list_node) 3431 + seq_printf(s, "%s\n", dev_name(pm_data->dev)); 3480 3432 3481 3433 genpd_unlock(genpd); 3482 3434 return ret;
+4 -10
drivers/pmdomain/imx/gpc.c
··· 455 455 } else { 456 456 struct imx_pm_domain *domain; 457 457 struct platform_device *pd_pdev; 458 - struct device_node *np; 459 458 struct clk *ipg_clk; 460 459 unsigned int ipg_rate_mhz; 461 460 int domain_index; ··· 464 465 return PTR_ERR(ipg_clk); 465 466 ipg_rate_mhz = clk_get_rate(ipg_clk) / 1000000; 466 467 467 - for_each_child_of_node(pgc_node, np) { 468 + for_each_child_of_node_scoped(pgc_node, np) { 468 469 ret = of_property_read_u32(np, "reg", &domain_index); 469 - if (ret) { 470 - of_node_put(np); 470 + if (ret) 471 471 return ret; 472 - } 472 + 473 473 if (domain_index >= of_id_data->num_domains) 474 474 continue; 475 475 476 476 pd_pdev = platform_device_alloc("imx-pgc-power-domain", 477 477 domain_index); 478 - if (!pd_pdev) { 479 - of_node_put(np); 478 + if (!pd_pdev) 480 479 return -ENOMEM; 481 - } 482 480 483 481 ret = platform_device_add_data(pd_pdev, 484 482 &imx_gpc_domains[domain_index], 485 483 sizeof(imx_gpc_domains[domain_index])); 486 484 if (ret) { 487 485 platform_device_put(pd_pdev); 488 - of_node_put(np); 489 486 return ret; 490 487 } 491 488 domain = pd_pdev->dev.platform_data; ··· 495 500 ret = platform_device_add(pd_pdev); 496 501 if (ret) { 497 502 platform_device_put(pd_pdev); 498 - of_node_put(np); 499 503 return ret; 500 504 } 501 505 }
+2 -6
drivers/pmdomain/imx/gpcv2.c
··· 1458 1458 .max_register = SZ_4K, 1459 1459 }; 1460 1460 struct device *dev = &pdev->dev; 1461 - struct device_node *pgc_np, *np; 1461 + struct device_node *pgc_np; 1462 1462 struct regmap *regmap; 1463 1463 void __iomem *base; 1464 1464 int ret; ··· 1480 1480 return ret; 1481 1481 } 1482 1482 1483 - for_each_child_of_node(pgc_np, np) { 1483 + for_each_child_of_node_scoped(pgc_np, np) { 1484 1484 struct platform_device *pd_pdev; 1485 1485 struct imx_pgc_domain *domain; 1486 1486 u32 domain_index; ··· 1491 1491 ret = of_property_read_u32(np, "reg", &domain_index); 1492 1492 if (ret) { 1493 1493 dev_err(dev, "Failed to read 'reg' property\n"); 1494 - of_node_put(np); 1495 1494 return ret; 1496 1495 } 1497 1496 ··· 1505 1506 domain_index); 1506 1507 if (!pd_pdev) { 1507 1508 dev_err(dev, "Failed to allocate platform device\n"); 1508 - of_node_put(np); 1509 1509 return -ENOMEM; 1510 1510 } 1511 1511 ··· 1513 1515 sizeof(domain_data->domains[domain_index])); 1514 1516 if (ret) { 1515 1517 platform_device_put(pd_pdev); 1516 - of_node_put(np); 1517 1518 return ret; 1518 1519 } 1519 1520 ··· 1529 1532 ret = platform_device_add(pd_pdev); 1530 1533 if (ret) { 1531 1534 platform_device_put(pd_pdev); 1532 - of_node_put(np); 1533 1535 return ret; 1534 1536 } 1535 1537 }
+9 -13
drivers/pmdomain/imx/imx93-pd.c
··· 28 28 void __iomem *addr; 29 29 struct clk_bulk_data *clks; 30 30 int num_clks; 31 - bool init_off; 32 31 }; 33 32 34 33 #define to_imx93_pd(_genpd) container_of(_genpd, struct imx93_power_domain, genpd) ··· 89 90 struct device *dev = &pdev->dev; 90 91 struct device_node *np = dev->of_node; 91 92 92 - if (!domain->init_off) 93 - clk_bulk_disable_unprepare(domain->num_clks, domain->clks); 94 - 95 93 of_genpd_del_provider(np); 96 94 pm_genpd_remove(&domain->genpd); 97 95 } ··· 98 102 struct device *dev = &pdev->dev; 99 103 struct device_node *np = dev->of_node; 100 104 struct imx93_power_domain *domain; 105 + bool init_off; 101 106 int ret; 102 107 103 108 domain = devm_kzalloc(dev, sizeof(*domain), GFP_KERNEL); ··· 118 121 domain->genpd.power_on = imx93_pd_on; 119 122 domain->dev = dev; 120 123 121 - domain->init_off = readl(domain->addr + MIX_FUNC_STAT_OFF) & FUNC_STAT_ISO_STAT_MASK; 124 + init_off = readl(domain->addr + MIX_FUNC_STAT_OFF) & FUNC_STAT_ISO_STAT_MASK; 122 125 /* Just to sync the status of hardware */ 123 - if (!domain->init_off) { 126 + if (!init_off) { 124 127 ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks); 125 - if (ret) { 126 - dev_err(domain->dev, "failed to enable clocks for domain: %s\n", 127 - domain->genpd.name); 128 - return ret; 129 - } 128 + if (ret) 129 + return dev_err_probe(domain->dev, ret, 130 + "failed to enable clocks for domain: %s\n", 131 + domain->genpd.name); 130 132 } 131 133 132 - ret = pm_genpd_init(&domain->genpd, NULL, domain->init_off); 134 + ret = pm_genpd_init(&domain->genpd, NULL, init_off); 133 135 if (ret) 134 136 goto err_clk_unprepare; 135 137 ··· 144 148 pm_genpd_remove(&domain->genpd); 145 149 146 150 err_clk_unprepare: 147 - if (!domain->init_off) 151 + if (!init_off) 148 152 clk_bulk_disable_unprepare(domain->num_clks, domain->clks); 149 153 150 154 return ret;
+2 -4
drivers/pmdomain/mediatek/mtk-pm-domains.c
··· 398 398 scpsys->dev->of_node = node; 399 399 pd->supply = devm_regulator_get(scpsys->dev, "domain"); 400 400 scpsys->dev->of_node = root_node; 401 - if (IS_ERR(pd->supply)) { 402 - dev_err_probe(scpsys->dev, PTR_ERR(pd->supply), 401 + if (IS_ERR(pd->supply)) 402 + return dev_err_cast_probe(scpsys->dev, pd->supply, 403 403 "%pOF: failed to get power supply.\n", 404 404 node); 405 - return ERR_CAST(pd->supply); 406 - } 407 405 } 408 406 409 407 pd->infracfg = syscon_regmap_lookup_by_phandle_optional(node, "mediatek,infracfg");
+37 -55
drivers/pmdomain/qcom/cpr.c
··· 4 4 * Copyright (c) 2019, Linaro Limited 5 5 */ 6 6 7 + #include <linux/cleanup.h> 7 8 #include <linux/module.h> 8 9 #include <linux/err.h> 9 10 #include <linux/debugfs.h> ··· 748 747 struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); 749 748 struct corner *corner, *end; 750 749 enum voltage_change_dir dir; 751 - int ret = 0, new_uV; 750 + int ret, new_uV; 752 751 753 - mutex_lock(&drv->lock); 752 + guard(mutex)(&drv->lock); 754 753 755 754 dev_dbg(drv->dev, "%s: setting perf state: %u (prev state: %u)\n", 756 755 __func__, state, cpr_get_cur_perf_state(drv)); ··· 761 760 */ 762 761 corner = drv->corners + state - 1; 763 762 end = &drv->corners[drv->num_corners - 1]; 764 - if (corner > end || corner < drv->corners) { 765 - ret = -EINVAL; 766 - goto unlock; 767 - } 763 + if (corner > end || corner < drv->corners) 764 + return -EINVAL; 768 765 769 766 /* Determine direction */ 770 767 if (drv->corner > corner) ··· 782 783 783 784 ret = cpr_scale_voltage(drv, corner, new_uV, dir); 784 785 if (ret) 785 - goto unlock; 786 + return ret; 786 787 787 788 if (cpr_is_allowed(drv)) { 788 789 cpr_irq_clr(drv); ··· 793 794 794 795 drv->corner = corner; 795 796 796 - unlock: 797 - mutex_unlock(&drv->lock); 798 - 799 - return ret; 797 + return 0; 800 798 } 801 799 802 800 static int ··· 1036 1040 static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp *ref, 1037 1041 struct device *cpu_dev) 1038 1042 { 1039 - u64 rate = 0; 1040 - struct device_node *ref_np; 1041 - struct device_node *desc_np; 1042 - struct device_node *child_np = NULL; 1043 - struct device_node *child_req_np = NULL; 1043 + struct device_node *ref_np __free(device_node) = NULL; 1044 + struct device_node *desc_np __free(device_node) = 1045 + dev_pm_opp_of_get_opp_desc_node(cpu_dev); 1044 1046 1045 - desc_np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); 1046 1047 if (!desc_np) 1047 1048 return 0; 1048 1049 1049 1050 ref_np = dev_pm_opp_get_of_node(ref); 1050 1051 if (!ref_np) 1051 - goto out_ref; 1052 + return 0; 1052 1053 1053 - do { 1054 - of_node_put(child_req_np); 1055 - child_np = of_get_next_available_child(desc_np, child_np); 1056 - child_req_np = of_parse_phandle(child_np, "required-opps", 0); 1057 - } while (child_np && child_req_np != ref_np); 1054 + for_each_available_child_of_node_scoped(desc_np, child_np) { 1055 + struct device_node *child_req_np __free(device_node) = 1056 + of_parse_phandle(child_np, "required-opps", 0); 1058 1057 1059 - if (child_np && child_req_np == ref_np) 1060 - of_property_read_u64(child_np, "opp-hz", &rate); 1058 + if (child_req_np == ref_np) { 1059 + u64 rate; 1061 1060 1062 - of_node_put(child_req_np); 1063 - of_node_put(child_np); 1064 - of_node_put(ref_np); 1065 - out_ref: 1066 - of_node_put(desc_np); 1061 + of_property_read_u64(child_np, "opp-hz", &rate); 1062 + return (unsigned long) rate; 1063 + } 1064 + } 1067 1065 1068 - return (unsigned long) rate; 1066 + return 0; 1069 1067 } 1070 1068 1071 1069 static int cpr_corner_init(struct cpr_drv *drv) ··· 1433 1443 { 1434 1444 struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); 1435 1445 const struct acc_desc *acc_desc = drv->acc_desc; 1436 - int ret = 0; 1446 + int ret; 1437 1447 1438 - mutex_lock(&drv->lock); 1448 + guard(mutex)(&drv->lock); 1439 1449 1440 1450 dev_dbg(drv->dev, "attach callback for: %s\n", dev_name(dev)); 1441 1451 ··· 1447 1457 * additional initialization when further CPUs get attached. 1448 1458 */ 1449 1459 if (drv->attached_cpu_dev) 1450 - goto unlock; 1460 + return 0; 1451 1461 1452 1462 /* 1453 1463 * cpr_scale_voltage() requires the direction (if we are changing ··· 1459 1469 * the first time cpr_set_performance_state() is called. 1460 1470 */ 1461 1471 drv->cpu_clk = devm_clk_get(dev, NULL); 1462 - if (IS_ERR(drv->cpu_clk)) { 1463 - ret = PTR_ERR(drv->cpu_clk); 1464 - if (ret != -EPROBE_DEFER) 1465 - dev_err(drv->dev, "could not get cpu clk: %d\n", ret); 1466 - goto unlock; 1467 - } 1472 + if (IS_ERR(drv->cpu_clk)) 1473 + return dev_err_probe(drv->dev, PTR_ERR(drv->cpu_clk), 1474 + "could not get cpu clk\n"); 1475 + 1468 1476 drv->attached_cpu_dev = dev; 1469 1477 1470 1478 dev_dbg(drv->dev, "using cpu clk from: %s\n", ··· 1479 1491 ret = dev_pm_opp_get_opp_count(&drv->pd.dev); 1480 1492 if (ret < 0) { 1481 1493 dev_err(drv->dev, "could not get OPP count\n"); 1482 - goto unlock; 1494 + return ret; 1483 1495 } 1484 1496 drv->num_corners = ret; 1485 1497 1486 1498 if (drv->num_corners < 2) { 1487 1499 dev_err(drv->dev, "need at least 2 OPPs to use CPR\n"); 1488 - ret = -EINVAL; 1489 - goto unlock; 1500 + return -EINVAL; 1490 1501 } 1491 1502 1492 1503 drv->corners = devm_kcalloc(drv->dev, drv->num_corners, 1493 1504 sizeof(*drv->corners), 1494 1505 GFP_KERNEL); 1495 - if (!drv->corners) { 1496 - ret = -ENOMEM; 1497 - goto unlock; 1498 - } 1506 + if (!drv->corners) 1507 + return -ENOMEM; 1499 1508 1500 1509 ret = cpr_corner_init(drv); 1501 1510 if (ret) 1502 - goto unlock; 1511 + return ret; 1503 1512 1504 1513 cpr_set_loop_allowed(drv); 1505 1514 1506 1515 ret = cpr_init_parameters(drv); 1507 1516 if (ret) 1508 - goto unlock; 1517 + return ret; 1509 1518 1510 1519 /* Configure CPR HW but keep it disabled */ 1511 1520 ret = cpr_config(drv); 1512 1521 if (ret) 1513 - goto unlock; 1522 + return ret; 1514 1523 1515 1524 ret = cpr_find_initial_corner(drv); 1516 1525 if (ret) 1517 - goto unlock; 1526 + return ret; 1518 1527 1519 1528 if (acc_desc->config) 1520 1529 regmap_multi_reg_write(drv->tcsr, acc_desc->config, ··· 1526 1541 dev_info(drv->dev, "driver initialized with %u OPPs\n", 1527 1542 drv->num_corners); 1528 1543 1529 - unlock: 1530 - mutex_unlock(&drv->lock); 1531 - 1532 - return ret; 1544 + return 0; 1533 1545 } 1534 1546 1535 1547 static int cpr_debug_info_show(struct seq_file *s, void *unused)
+5 -6
drivers/pmdomain/qcom/rpmhpd.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/ 3 3 4 + #include <linux/cleanup.h> 4 5 #include <linux/err.h> 5 6 #include <linux/init.h> 6 7 #include <linux/kernel.h> ··· 776 775 unsigned int level) 777 776 { 778 777 struct rpmhpd *pd = domain_to_rpmhpd(domain); 779 - int ret = 0, i; 778 + int ret, i; 780 779 781 - mutex_lock(&rpmhpd_lock); 780 + guard(mutex)(&rpmhpd_lock); 782 781 783 782 for (i = 0; i < pd->level_count; i++) 784 783 if (level <= pd->level[i]) ··· 798 797 799 798 ret = rpmhpd_aggregate_corner(pd, i); 800 799 if (ret) 801 - goto out; 800 + return ret; 802 801 } 803 802 804 803 pd->corner = i; 805 - out: 806 - mutex_unlock(&rpmhpd_lock); 807 804 808 - return ret; 805 + return 0; 809 806 } 810 807 811 808 static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd)
+6 -14
drivers/pmdomain/qcom/rpmpd.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. */ 3 3 4 + #include <linux/cleanup.h> 4 5 #include <linux/err.h> 5 6 #include <linux/init.h> 6 7 #include <linux/kernel.h> ··· 1025 1024 int ret; 1026 1025 struct rpmpd *pd = domain_to_rpmpd(domain); 1027 1026 1028 - mutex_lock(&rpmpd_lock); 1027 + guard(mutex)(&rpmpd_lock); 1029 1028 1030 1029 ret = rpmpd_send_enable(pd, true); 1031 1030 if (ret) 1032 - goto out; 1031 + return ret; 1033 1032 1034 1033 pd->enabled = true; 1035 1034 1036 1035 if (pd->corner) 1037 1036 ret = rpmpd_aggregate_corner(pd); 1038 - 1039 - out: 1040 - mutex_unlock(&rpmpd_lock); 1041 1037 1042 1038 return ret; 1043 1039 } ··· 1058 1060 static int rpmpd_set_performance(struct generic_pm_domain *domain, 1059 1061 unsigned int state) 1060 1062 { 1061 - int ret = 0; 1062 1063 struct rpmpd *pd = domain_to_rpmpd(domain); 1063 1064 1064 1065 if (state > pd->max_state) 1065 1066 state = pd->max_state; 1066 1067 1067 - mutex_lock(&rpmpd_lock); 1068 + guard(mutex)(&rpmpd_lock); 1068 1069 1069 1070 pd->corner = state; 1070 1071 1071 1072 /* Always send updates for vfc and vfl */ 1072 1073 if (!pd->enabled && pd->key != cpu_to_le32(KEY_FLOOR_CORNER) && 1073 1074 pd->key != cpu_to_le32(KEY_FLOOR_LEVEL)) 1074 - goto out; 1075 + return 0; 1075 1076 1076 - ret = rpmpd_aggregate_corner(pd); 1077 - 1078 - out: 1079 - mutex_unlock(&rpmpd_lock); 1080 - 1081 - return ret; 1077 + return rpmpd_aggregate_corner(pd); 1082 1078 } 1083 1079 1084 1080 static int rpmpd_probe(struct platform_device *pdev)
+98 -20
drivers/pmdomain/rockchip/pm-domains.c
··· 33 33 #include <dt-bindings/power/rk3368-power.h> 34 34 #include <dt-bindings/power/rk3399-power.h> 35 35 #include <dt-bindings/power/rk3568-power.h> 36 + #include <dt-bindings/power/rockchip,rk3576-power.h> 36 37 #include <dt-bindings/power/rk3588-power.h> 37 38 38 39 struct rockchip_domain_info { ··· 46 45 bool active_wakeup; 47 46 int pwr_w_mask; 48 47 int req_w_mask; 48 + int clk_ungate_mask; 49 49 int mem_status_mask; 50 50 int repair_status_mask; 51 51 u32 pwr_offset; ··· 64 62 u32 chain_status_offset; 65 63 u32 mem_status_offset; 66 64 u32 repair_status_offset; 65 + u32 clk_ungate_offset; 67 66 68 67 u32 core_pwrcnt_offset; 69 68 u32 gpu_pwrcnt_offset; ··· 147 144 .active_wakeup = wakeup, \ 148 145 } 149 146 147 + #define DOMAIN_M_O_R_G(_name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, ack, g_mask, wakeup) \ 148 + { \ 149 + .name = _name, \ 150 + .pwr_offset = p_offset, \ 151 + .pwr_w_mask = (pwr) << 16, \ 152 + .pwr_mask = (pwr), \ 153 + .status_mask = (status), \ 154 + .mem_offset = m_offset, \ 155 + .mem_status_mask = (m_status), \ 156 + .repair_status_mask = (r_status), \ 157 + .req_offset = r_offset, \ 158 + .req_w_mask = (req) << 16, \ 159 + .req_mask = (req), \ 160 + .idle_mask = (idle), \ 161 + .clk_ungate_mask = (g_mask), \ 162 + .ack_mask = (ack), \ 163 + .active_wakeup = wakeup, \ 164 + } 165 + 150 166 #define DOMAIN_RK3036(_name, req, ack, idle, wakeup) \ 151 167 { \ 152 168 .name = _name, \ ··· 196 174 197 175 #define DOMAIN_RK3568(name, pwr, req, wakeup) \ 198 176 DOMAIN_M(name, pwr, pwr, req, req, req, wakeup) 177 + 178 + #define DOMAIN_RK3576(name, p_offset, pwr, status, r_status, r_offset, req, idle, g_mask, wakeup) \ 179 + DOMAIN_M_O_R_G(name, p_offset, pwr, status, 0, r_status, r_status, r_offset, req, idle, idle, g_mask, wakeup) 199 180 200 181 /* 201 182 * Dynamic Memory Controller may need to coordinate with us -- see ··· 322 297 323 298 regmap_read(pmu->regmap, pmu->info->ack_offset, &val); 324 299 return val; 300 + } 301 + 302 + static int rockchip_pmu_ungate_clk(struct rockchip_pm_domain *pd, bool ungate) 303 + { 304 + const struct rockchip_domain_info *pd_info = pd->info; 305 + struct rockchip_pmu *pmu = pd->pmu; 306 + unsigned int val; 307 + int clk_ungate_w_mask = pd_info->clk_ungate_mask << 16; 308 + 309 + if (!pd_info->clk_ungate_mask) 310 + return 0; 311 + 312 + if (!pmu->info->clk_ungate_offset) 313 + return 0; 314 + 315 + val = ungate ? (pd_info->clk_ungate_mask | clk_ungate_w_mask) : 316 + clk_ungate_w_mask; 317 + regmap_write(pmu->regmap, pmu->info->clk_ungate_offset, val); 318 + 319 + return 0; 325 320 } 326 321 327 322 static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd, ··· 584 539 return ret; 585 540 } 586 541 542 + rockchip_pmu_ungate_clk(pd, true); 543 + 587 544 if (!power_on) { 588 545 rockchip_pmu_save_qos(pd); 589 546 ··· 602 555 rockchip_pmu_restore_qos(pd); 603 556 } 604 557 558 + rockchip_pmu_ungate_clk(pd, false); 605 559 clk_bulk_disable(pd->num_clks, pd->clks); 606 560 } 607 561 ··· 760 712 goto err_unprepare_clocks; 761 713 } 762 714 pd->qos_regmap[j] = syscon_node_to_regmap(qos_node); 715 + of_node_put(qos_node); 763 716 if (IS_ERR(pd->qos_regmap[j])) { 764 717 error = -ENODEV; 765 - of_node_put(qos_node); 766 718 goto err_unprepare_clocks; 767 719 } 768 - of_node_put(qos_node); 769 720 } 770 721 } 771 722 ··· 847 800 static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu, 848 801 struct device_node *parent) 849 802 { 850 - struct device_node *np; 851 803 struct generic_pm_domain *child_domain, *parent_domain; 852 804 int error; 853 805 854 - for_each_child_of_node(parent, np) { 806 + for_each_child_of_node_scoped(parent, np) { 855 807 u32 idx; 856 808 857 809 error = of_property_read_u32(parent, "reg", &idx); ··· 858 812 dev_err(pmu->dev, 859 813 "%pOFn: failed to retrieve domain id (reg): %d\n", 860 814 parent, error); 861 - goto err_out; 815 + return error; 862 816 } 863 817 parent_domain = pmu->genpd_data.domains[idx]; 864 818 ··· 866 820 if (error) { 867 821 dev_err(pmu->dev, "failed to handle node %pOFn: %d\n", 868 822 np, error); 869 - goto err_out; 823 + return error; 870 824 } 871 825 872 826 error = of_property_read_u32(np, "reg", &idx); ··· 874 828 dev_err(pmu->dev, 875 829 "%pOFn: failed to retrieve domain id (reg): %d\n", 876 830 np, error); 877 - goto err_out; 831 + return error; 878 832 } 879 833 child_domain = pmu->genpd_data.domains[idx]; 880 834 ··· 882 836 if (error) { 883 837 dev_err(pmu->dev, "%s failed to add subdomain %s: %d\n", 884 838 parent_domain->name, child_domain->name, error); 885 - goto err_out; 839 + return error; 886 840 } else { 887 841 dev_dbg(pmu->dev, "%s add subdomain: %s\n", 888 842 parent_domain->name, child_domain->name); ··· 892 846 } 893 847 894 848 return 0; 895 - 896 - err_out: 897 - of_node_put(np); 898 - return error; 899 849 } 900 850 901 851 static int rockchip_pm_domain_probe(struct platform_device *pdev) 902 852 { 903 853 struct device *dev = &pdev->dev; 904 854 struct device_node *np = dev->of_node; 905 - struct device_node *node; 906 855 struct device *parent; 907 856 struct rockchip_pmu *pmu; 908 857 const struct rockchip_pmu_info *pmu_info; ··· 953 912 * Prevent any rockchip_pmu_block() from racing with the remainder of 954 913 * setup (clocks, register initialization). 955 914 */ 956 - mutex_lock(&dmc_pmu_mutex); 915 + guard(mutex)(&dmc_pmu_mutex); 957 916 958 - for_each_available_child_of_node(np, node) { 917 + for_each_available_child_of_node_scoped(np, node) { 959 918 error = rockchip_pm_add_one_domain(pmu, node); 960 919 if (error) { 961 920 dev_err(dev, "failed to handle node %pOFn: %d\n", 962 921 node, error); 963 - of_node_put(node); 964 922 goto err_out; 965 923 } 966 924 ··· 967 927 if (error < 0) { 968 928 dev_err(dev, "failed to handle subdomain node %pOFn: %d\n", 969 929 node, error); 970 - of_node_put(node); 971 930 goto err_out; 972 931 } 973 932 } ··· 986 947 if (!WARN_ON_ONCE(dmc_pmu)) 987 948 dmc_pmu = pmu; 988 949 989 - mutex_unlock(&dmc_pmu_mutex); 990 - 991 950 return 0; 992 951 993 952 err_out: 994 953 rockchip_pm_domain_cleanup(pmu); 995 - mutex_unlock(&dmc_pmu_mutex); 996 954 return error; 997 955 } 998 956 ··· 1140 1104 [RK3568_PD_RKVDEC] = DOMAIN_RK3568("vdec", BIT(4), BIT(8), false), 1141 1105 [RK3568_PD_RKVENC] = DOMAIN_RK3568("venc", BIT(3), BIT(7), false), 1142 1106 [RK3568_PD_PIPE] = DOMAIN_RK3568("pipe", BIT(8), BIT(11), false), 1107 + }; 1108 + 1109 + static const struct rockchip_domain_info rk3576_pm_domains[] = { 1110 + [RK3576_PD_NPU] = DOMAIN_RK3576("npu", 0x0, BIT(0), BIT(0), 0, 0x0, 0, 0, 0, false), 1111 + [RK3576_PD_NVM] = DOMAIN_RK3576("nvm", 0x0, BIT(6), 0, BIT(6), 0x4, BIT(2), BIT(18), BIT(2), false), 1112 + [RK3576_PD_SDGMAC] = DOMAIN_RK3576("sdgmac", 0x0, BIT(7), 0, BIT(7), 0x4, BIT(1), BIT(17), 0x6, false), 1113 + [RK3576_PD_AUDIO] = DOMAIN_RK3576("audio", 0x0, BIT(8), 0, BIT(8), 0x4, BIT(0), BIT(16), BIT(0), false), 1114 + [RK3576_PD_PHP] = DOMAIN_RK3576("php", 0x0, BIT(9), 0, BIT(9), 0x0, BIT(15), BIT(15), BIT(15), false), 1115 + [RK3576_PD_SUBPHP] = DOMAIN_RK3576("subphp", 0x0, BIT(10), 0, BIT(10), 0x0, 0, 0, 0, false), 1116 + [RK3576_PD_VOP] = DOMAIN_RK3576("vop", 0x0, BIT(11), 0, BIT(11), 0x0, 0x6000, 0x6000, 0x6000, false), 1117 + [RK3576_PD_VO1] = DOMAIN_RK3576("vo1", 0x0, BIT(14), 0, BIT(14), 0x0, BIT(12), BIT(12), 0x7000, false), 1118 + [RK3576_PD_VO0] = DOMAIN_RK3576("vo0", 0x0, BIT(15), 0, BIT(15), 0x0, BIT(11), BIT(11), 0x6800, false), 1119 + [RK3576_PD_USB] = DOMAIN_RK3576("usb", 0x4, BIT(0), 0, BIT(16), 0x0, BIT(10), BIT(10), 0x6400, true), 1120 + [RK3576_PD_VI] = DOMAIN_RK3576("vi", 0x4, BIT(1), 0, BIT(17), 0x0, BIT(9), BIT(9), BIT(9), false), 1121 + [RK3576_PD_VEPU0] = DOMAIN_RK3576("vepu0", 0x4, BIT(2), 0, BIT(18), 0x0, BIT(7), BIT(7), 0x280, false), 1122 + [RK3576_PD_VEPU1] = DOMAIN_RK3576("vepu1", 0x4, BIT(3), 0, BIT(19), 0x0, BIT(8), BIT(8), BIT(8), false), 1123 + [RK3576_PD_VDEC] = DOMAIN_RK3576("vdec", 0x4, BIT(4), 0, BIT(20), 0x0, BIT(6), BIT(6), BIT(6), false), 1124 + [RK3576_PD_VPU] = DOMAIN_RK3576("vpu", 0x4, BIT(5), 0, BIT(21), 0x0, BIT(5), BIT(5), BIT(5), false), 1125 + [RK3576_PD_NPUTOP] = DOMAIN_RK3576("nputop", 0x4, BIT(6), 0, BIT(22), 0x0, 0x18, 0x18, 0x18, false), 1126 + [RK3576_PD_NPU0] = DOMAIN_RK3576("npu0", 0x4, BIT(7), 0, BIT(23), 0x0, BIT(1), BIT(1), 0x1a, false), 1127 + [RK3576_PD_NPU1] = DOMAIN_RK3576("npu1", 0x4, BIT(8), 0, BIT(24), 0x0, BIT(2), BIT(2), 0x1c, false), 1128 + [RK3576_PD_GPU] = DOMAIN_RK3576("gpu", 0x4, BIT(9), 0, BIT(25), 0x0, BIT(0), BIT(0), BIT(0), false), 1143 1129 }; 1144 1130 1145 1131 static const struct rockchip_domain_info rk3588_pm_domains[] = { ··· 1342 1284 .domain_info = rk3568_pm_domains, 1343 1285 }; 1344 1286 1287 + static const struct rockchip_pmu_info rk3576_pmu = { 1288 + .pwr_offset = 0x210, 1289 + .status_offset = 0x230, 1290 + .chain_status_offset = 0x248, 1291 + .mem_status_offset = 0x250, 1292 + .mem_pwr_offset = 0x300, 1293 + .req_offset = 0x110, 1294 + .idle_offset = 0x128, 1295 + .ack_offset = 0x120, 1296 + .repair_status_offset = 0x570, 1297 + .clk_ungate_offset = 0x140, 1298 + 1299 + .num_domains = ARRAY_SIZE(rk3576_pm_domains), 1300 + .domain_info = rk3576_pm_domains, 1301 + }; 1302 + 1345 1303 static const struct rockchip_pmu_info rk3588_pmu = { 1346 1304 .pwr_offset = 0x14c, 1347 1305 .status_offset = 0x180, ··· 1432 1358 { 1433 1359 .compatible = "rockchip,rk3568-power-controller", 1434 1360 .data = (void *)&rk3568_pmu, 1361 + }, 1362 + { 1363 + .compatible = "rockchip,rk3576-power-controller", 1364 + .data = (void *)&rk3576_pmu, 1435 1365 }, 1436 1366 { 1437 1367 .compatible = "rockchip,rk3588-power-controller",
+30
include/dt-bindings/power/rockchip,rk3576-power.h
··· 1 + /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ 2 + #ifndef __DT_BINDINGS_POWER_RK3576_POWER_H__ 3 + #define __DT_BINDINGS_POWER_RK3576_POWER_H__ 4 + 5 + /* VD_NPU */ 6 + #define RK3576_PD_NPU 0 7 + #define RK3576_PD_NPUTOP 1 8 + #define RK3576_PD_NPU0 2 9 + #define RK3576_PD_NPU1 3 10 + 11 + /* VD_GPU */ 12 + #define RK3576_PD_GPU 4 13 + 14 + /* VD_LOGIC */ 15 + #define RK3576_PD_NVM 5 16 + #define RK3576_PD_SDGMAC 6 17 + #define RK3576_PD_USB 7 18 + #define RK3576_PD_PHP 8 19 + #define RK3576_PD_SUBPHP 9 20 + #define RK3576_PD_AUDIO 10 21 + #define RK3576_PD_VEPU0 11 22 + #define RK3576_PD_VEPU1 12 23 + #define RK3576_PD_VPU 13 24 + #define RK3576_PD_VDEC 14 25 + #define RK3576_PD_VI 15 26 + #define RK3576_PD_VO0 16 27 + #define RK3576_PD_VO1 17 28 + #define RK3576_PD_VOP 18 29 + 30 + #endif
+15 -1
include/linux/pm_domain.h
··· 198 198 spinlock_t slock; 199 199 unsigned long lock_flags; 200 200 }; 201 + struct { 202 + raw_spinlock_t raw_slock; 203 + unsigned long raw_lock_flags; 204 + }; 201 205 }; 202 - 203 206 }; 204 207 205 208 static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) ··· 476 473 int dev_pm_domain_attach_list(struct device *dev, 477 474 const struct dev_pm_domain_attach_data *data, 478 475 struct dev_pm_domain_list **list); 476 + int devm_pm_domain_attach_list(struct device *dev, 477 + const struct dev_pm_domain_attach_data *data, 478 + struct dev_pm_domain_list **list); 479 479 void dev_pm_domain_detach(struct device *dev, bool power_off); 480 480 void dev_pm_domain_detach_list(struct dev_pm_domain_list *list); 481 481 int dev_pm_domain_start(struct device *dev); ··· 505 499 { 506 500 return 0; 507 501 } 502 + 503 + static inline int devm_pm_domain_attach_list(struct device *dev, 504 + const struct dev_pm_domain_attach_data *data, 505 + struct dev_pm_domain_list **list) 506 + { 507 + return 0; 508 + } 509 + 508 510 static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {} 509 511 static inline void dev_pm_domain_detach_list(struct dev_pm_domain_list *list) {} 510 512 static inline int dev_pm_domain_start(struct device *dev)