Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge back earlier material related to system sleep

+227 -68
+12
Documentation/admin-guide/kernel-parameters.txt
··· 5000 5000 that number, otherwise (e.g., 'pmu_override=on'), MMCR1 5001 5001 remains 0. 5002 5002 5003 + pm_async= [PM] 5004 + Format: off 5005 + This parameter sets the initial value of the 5006 + /sys/power/pm_async sysfs knob at boot time. 5007 + If set to "off", disables asynchronous suspend and 5008 + resume of devices during system-wide power transitions. 5009 + This can be useful on platforms where device 5010 + dependencies are not well-defined, or for debugging 5011 + power management issues. Asynchronous operations are 5012 + enabled by default. 5013 + 5014 + 5003 5015 pm_debug_messages [SUSPEND,KNL] 5004 5016 Enable suspend/resume debug messages during boot up. 5005 5017
+4 -4
MAINTAINERS
··· 9787 9787 9788 9788 FREEZER 9789 9789 M: "Rafael J. Wysocki" <rafael@kernel.org> 9790 - M: Pavel Machek <pavel@kernel.org> 9790 + R: Pavel Machek <pavel@kernel.org> 9791 9791 L: linux-pm@vger.kernel.org 9792 9792 S: Supported 9793 9793 F: Documentation/power/freezing-of-tasks.rst ··· 10663 10663 10664 10664 HIBERNATION (aka Software Suspend, aka swsusp) 10665 10665 M: "Rafael J. Wysocki" <rafael@kernel.org> 10666 - M: Pavel Machek <pavel@kernel.org> 10666 + R: Pavel Machek <pavel@kernel.org> 10667 10667 L: linux-pm@vger.kernel.org 10668 10668 S: Supported 10669 10669 B: https://bugzilla.kernel.org ··· 23938 23938 23939 23939 SUSPEND TO RAM 23940 23940 M: "Rafael J. Wysocki" <rafael@kernel.org> 23941 - M: Len Brown <len.brown@intel.com> 23942 - M: Pavel Machek <pavel@kernel.org> 23941 + R: Len Brown <lenb@kernel.org> 23942 + R: Pavel Machek <pavel@kernel.org> 23943 23943 L: linux-pm@vger.kernel.org 23944 23944 S: Supported 23945 23945 B: https://bugzilla.kernel.org
+4
drivers/acpi/device_pm.c
··· 1119 1119 { 1120 1120 struct acpi_device *adev = ACPI_COMPANION(dev); 1121 1121 1122 + dev_pm_set_strict_midlayer(dev, true); 1123 + 1122 1124 if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) { 1123 1125 int ret = dev->driver->pm->prepare(dev); 1124 1126 ··· 1149 1147 */ 1150 1148 if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) 1151 1149 pm_request_resume(dev); 1150 + 1151 + dev_pm_set_strict_midlayer(dev, false); 1152 1152 } 1153 1153 EXPORT_SYMBOL_GPL(acpi_subsys_complete); 1154 1154
+59 -17
drivers/base/power/main.c
··· 647 647 /* 648 648 * Start processing "async" children of the device unless it's been 649 649 * started already for them. 650 - * 651 - * This could have been done for the device's "async" consumers too, but 652 - * they either need to wait for their parents or the processing has 653 - * already started for them after their parents were processed. 654 650 */ 655 651 device_for_each_child(dev, func, dpm_async_with_cleanup); 652 + } 653 + 654 + static void dpm_async_resume_subordinate(struct device *dev, async_func_t func) 655 + { 656 + struct device_link *link; 657 + int idx; 658 + 659 + dpm_async_resume_children(dev, func); 660 + 661 + idx = device_links_read_lock(); 662 + 663 + /* Start processing the device's "async" consumers. */ 664 + list_for_each_entry_rcu(link, &dev->links.consumers, s_node) 665 + if (READ_ONCE(link->status) != DL_STATE_DORMANT) 666 + dpm_async_with_cleanup(link->consumer, func); 667 + 668 + device_links_read_unlock(idx); 656 669 } 657 670 658 671 static void dpm_clear_async_state(struct device *dev) ··· 676 663 677 664 static bool dpm_root_device(struct device *dev) 678 665 { 679 - return !dev->parent; 666 + lockdep_assert_held(&dpm_list_mtx); 667 + 668 + /* 669 + * Since this function is required to run under dpm_list_mtx, the 670 + * list_empty() below will only return true if the device's list of 671 + * consumers is actually empty before calling it. 672 + */ 673 + return !dev->parent && list_empty(&dev->links.suppliers); 680 674 } 681 675 682 676 static void async_resume_noirq(void *data, async_cookie_t cookie); ··· 772 752 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); 773 753 } 774 754 775 - dpm_async_resume_children(dev, async_resume_noirq); 755 + dpm_async_resume_subordinate(dev, async_resume_noirq); 776 756 } 777 757 778 758 static void async_resume_noirq(void *data, async_cookie_t cookie) ··· 915 895 pm_dev_err(dev, state, async ? " async early" : " early", error); 916 896 } 917 897 918 - dpm_async_resume_children(dev, async_resume_early); 898 + dpm_async_resume_subordinate(dev, async_resume_early); 919 899 } 920 900 921 901 static void async_resume_early(void *data, async_cookie_t cookie) ··· 1091 1071 pm_dev_err(dev, state, async ? " async" : "", error); 1092 1072 } 1093 1073 1094 - dpm_async_resume_children(dev, async_resume); 1074 + dpm_async_resume_subordinate(dev, async_resume); 1095 1075 } 1096 1076 1097 1077 static void async_resume(void *data, async_cookie_t cookie) ··· 1115 1095 ktime_t starttime = ktime_get(); 1116 1096 1117 1097 trace_suspend_resume(TPS("dpm_resume"), state.event, true); 1118 - might_sleep(); 1119 1098 1120 1099 pm_transition = state; 1121 1100 async_error = 0; ··· 1217 1198 struct list_head list; 1218 1199 1219 1200 trace_suspend_resume(TPS("dpm_complete"), state.event, true); 1220 - might_sleep(); 1221 1201 1222 1202 INIT_LIST_HEAD(&list); 1223 1203 mutex_lock(&dpm_list_mtx); ··· 1276 1258 return false; 1277 1259 } 1278 1260 1279 - return true; 1261 + /* 1262 + * Since this function is required to run under dpm_list_mtx, the 1263 + * list_empty() below will only return true if the device's list of 1264 + * consumers is actually empty before calling it. 1265 + */ 1266 + return list_empty(&dev->links.consumers); 1280 1267 } 1281 1268 1282 - static void dpm_async_suspend_parent(struct device *dev, async_func_t func) 1269 + static bool dpm_async_suspend_parent(struct device *dev, async_func_t func) 1283 1270 { 1284 1271 guard(mutex)(&dpm_list_mtx); 1285 1272 ··· 1296 1273 * deleted before it. 1297 1274 */ 1298 1275 if (!device_pm_initialized(dev)) 1299 - return; 1276 + return false; 1300 1277 1301 1278 /* Start processing the device's parent if it is "async". */ 1302 1279 if (dev->parent) 1303 1280 dpm_async_with_cleanup(dev->parent, func); 1281 + 1282 + return true; 1283 + } 1284 + 1285 + static void dpm_async_suspend_superior(struct device *dev, async_func_t func) 1286 + { 1287 + struct device_link *link; 1288 + int idx; 1289 + 1290 + if (!dpm_async_suspend_parent(dev, func)) 1291 + return; 1292 + 1293 + idx = device_links_read_lock(); 1294 + 1295 + /* Start processing the device's "async" suppliers. */ 1296 + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) 1297 + if (READ_ONCE(link->status) != DL_STATE_DORMANT) 1298 + dpm_async_with_cleanup(link->supplier, func); 1299 + 1300 + device_links_read_unlock(idx); 1304 1301 } 1305 1302 1306 1303 static void dpm_async_suspend_complete_all(struct list_head *device_list) ··· 1460 1417 if (error || async_error) 1461 1418 return error; 1462 1419 1463 - dpm_async_suspend_parent(dev, async_suspend_noirq); 1420 + dpm_async_suspend_superior(dev, async_suspend_noirq); 1464 1421 1465 1422 return 0; 1466 1423 } ··· 1657 1614 if (error || async_error) 1658 1615 return error; 1659 1616 1660 - dpm_async_suspend_parent(dev, async_suspend_late); 1617 + dpm_async_suspend_superior(dev, async_suspend_late); 1661 1618 1662 1619 return 0; 1663 1620 } ··· 1949 1906 if (error || async_error) 1950 1907 return error; 1951 1908 1952 - dpm_async_suspend_parent(dev, async_suspend); 1909 + dpm_async_suspend_superior(dev, async_suspend); 1953 1910 1954 1911 return 0; 1955 1912 } ··· 2172 2129 int error = 0; 2173 2130 2174 2131 trace_suspend_resume(TPS("dpm_prepare"), state.event, true); 2175 - might_sleep(); 2176 2132 2177 2133 /* 2178 2134 * Give a chance for the known devices to complete their probes, before
+88 -39
drivers/base/power/runtime.c
··· 19 19 20 20 typedef int (*pm_callback_t)(struct device *); 21 21 22 + static inline pm_callback_t get_callback_ptr(const void *start, size_t offset) 23 + { 24 + return *(pm_callback_t *)(start + offset); 25 + } 26 + 27 + static pm_callback_t __rpm_get_driver_callback(struct device *dev, 28 + size_t cb_offset) 29 + { 30 + if (dev->driver && dev->driver->pm) 31 + return get_callback_ptr(dev->driver->pm, cb_offset); 32 + 33 + return NULL; 34 + } 35 + 22 36 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset) 23 37 { 24 - pm_callback_t cb; 25 38 const struct dev_pm_ops *ops; 39 + pm_callback_t cb = NULL; 26 40 27 41 if (dev->pm_domain) 28 42 ops = &dev->pm_domain->ops; ··· 50 36 ops = NULL; 51 37 52 38 if (ops) 53 - cb = *(pm_callback_t *)((void *)ops + cb_offset); 54 - else 55 - cb = NULL; 39 + cb = get_callback_ptr(ops, cb_offset); 56 40 57 - if (!cb && dev->driver && dev->driver->pm) 58 - cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset); 41 + if (!cb) 42 + cb = __rpm_get_driver_callback(dev, cb_offset); 59 43 60 44 return cb; 61 45 } ··· 1839 1827 dev->power.request_pending = false; 1840 1828 dev->power.request = RPM_REQ_NONE; 1841 1829 dev->power.deferred_resume = false; 1842 - dev->power.needs_force_resume = 0; 1830 + dev->power.needs_force_resume = false; 1843 1831 INIT_WORK(&dev->power.work, pm_runtime_work); 1844 1832 1845 1833 dev->power.timer_expires = 0; ··· 1866 1854 pm_runtime_put(dev->parent); 1867 1855 } 1868 1856 } 1857 + /* 1858 + * Clear power.needs_force_resume in case it has been set by 1859 + * pm_runtime_force_suspend() invoked from a driver remove callback. 1860 + */ 1861 + dev->power.needs_force_resume = false; 1869 1862 } 1870 1863 1871 1864 /** ··· 1958 1941 pm_request_idle(link->supplier); 1959 1942 } 1960 1943 1961 - bool pm_runtime_need_not_resume(struct device *dev) 1944 + static pm_callback_t get_callback(struct device *dev, size_t cb_offset) 1962 1945 { 1963 - return atomic_read(&dev->power.usage_count) <= 1 && 1964 - (atomic_read(&dev->power.child_count) == 0 || 1965 - dev->power.ignore_children); 1946 + /* 1947 + * Setting power.strict_midlayer means that the middle layer 1948 + * code does not want its runtime PM callbacks to be invoked via 1949 + * pm_runtime_force_suspend() and pm_runtime_force_resume(), so 1950 + * return a direct pointer to the driver callback in that case. 1951 + */ 1952 + if (dev_pm_strict_midlayer_is_set(dev)) 1953 + return __rpm_get_driver_callback(dev, cb_offset); 1954 + 1955 + return __rpm_get_callback(dev, cb_offset); 1966 1956 } 1957 + 1958 + #define GET_CALLBACK(dev, callback) \ 1959 + get_callback(dev, offsetof(struct dev_pm_ops, callback)) 1967 1960 1968 1961 /** 1969 1962 * pm_runtime_force_suspend - Force a device into suspend state if needed. ··· 1991 1964 * sure the device is put into low power state and it should only be used during 1992 1965 * system-wide PM transitions to sleep states. It assumes that the analogous 1993 1966 * pm_runtime_force_resume() will be used to resume the device. 1994 - * 1995 - * Do not use with DPM_FLAG_SMART_SUSPEND as this can lead to an inconsistent 1996 - * state where this function has called the ->runtime_suspend callback but the 1997 - * PM core marks the driver as runtime active. 1998 1967 */ 1999 1968 int pm_runtime_force_suspend(struct device *dev) 2000 1969 { ··· 1998 1975 int ret; 1999 1976 2000 1977 pm_runtime_disable(dev); 2001 - if (pm_runtime_status_suspended(dev)) 1978 + if (pm_runtime_status_suspended(dev) || dev->power.needs_force_resume) 2002 1979 return 0; 2003 1980 2004 - callback = RPM_GET_CALLBACK(dev, runtime_suspend); 1981 + callback = GET_CALLBACK(dev, runtime_suspend); 2005 1982 2006 1983 dev_pm_enable_wake_irq_check(dev, true); 2007 1984 ret = callback ? callback(dev) : 0; ··· 2013 1990 /* 2014 1991 * If the device can stay in suspend after the system-wide transition 2015 1992 * to the working state that will follow, drop the children counter of 2016 - * its parent, but set its status to RPM_SUSPENDED anyway in case this 2017 - * function will be called again for it in the meantime. 1993 + * its parent and the usage counters of its suppliers. Otherwise, set 1994 + * power.needs_force_resume to let pm_runtime_force_resume() know that 1995 + * the device needs to be taken care of and to prevent this function 1996 + * from handling the device again in case the device is passed to it 1997 + * once more subsequently. 2018 1998 */ 2019 - if (pm_runtime_need_not_resume(dev)) { 1999 + if (pm_runtime_need_not_resume(dev)) 2020 2000 pm_runtime_set_suspended(dev); 2021 - } else { 2022 - __update_runtime_status(dev, RPM_SUSPENDED); 2023 - dev->power.needs_force_resume = 1; 2024 - } 2001 + else 2002 + dev->power.needs_force_resume = true; 2025 2003 2026 2004 return 0; 2027 2005 ··· 2033 2009 } 2034 2010 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend); 2035 2011 2012 + #ifdef CONFIG_PM_SLEEP 2013 + 2036 2014 /** 2037 2015 * pm_runtime_force_resume - Force a device into resume state if needed. 2038 2016 * @dev: Device to resume. 2039 2017 * 2040 - * Prior invoking this function we expect the user to have brought the device 2041 - * into low power state by a call to pm_runtime_force_suspend(). Here we reverse 2042 - * those actions and bring the device into full power, if it is expected to be 2043 - * used on system resume. In the other case, we defer the resume to be managed 2044 - * via runtime PM. 2018 + * This function expects that either pm_runtime_force_suspend() has put the 2019 + * device into a low-power state prior to calling it, or the device had been 2020 + * runtime-suspended before the preceding system-wide suspend transition and it 2021 + * was left in suspend during that transition. 2045 2022 * 2046 - * Typically this function may be invoked from a system resume callback. 2023 + * The actions carried out by pm_runtime_force_suspend(), or by a runtime 2024 + * suspend in general, are reversed and the device is brought back into full 2025 + * power if it is expected to be used on system resume, which is the case when 2026 + * its needs_force_resume flag is set or when its smart_suspend flag is set and 2027 + * its runtime PM status is "active". 2028 + * 2029 + * In other cases, the resume is deferred to be managed via runtime PM. 2030 + * 2031 + * Typically, this function may be invoked from a system resume callback. 2047 2032 */ 2048 2033 int pm_runtime_force_resume(struct device *dev) 2049 2034 { 2050 2035 int (*callback)(struct device *); 2051 2036 int ret = 0; 2052 2037 2053 - if (!dev->power.needs_force_resume) 2038 + if (!dev->power.needs_force_resume && (!dev_pm_smart_suspend(dev) || 2039 + pm_runtime_status_suspended(dev))) 2054 2040 goto out; 2055 2041 2056 - /* 2057 - * The value of the parent's children counter is correct already, so 2058 - * just update the status of the device. 2059 - */ 2060 - __update_runtime_status(dev, RPM_ACTIVE); 2061 - 2062 - callback = RPM_GET_CALLBACK(dev, runtime_resume); 2042 + callback = GET_CALLBACK(dev, runtime_resume); 2063 2043 2064 2044 dev_pm_disable_wake_irq_check(dev, false); 2065 2045 ret = callback ? callback(dev) : 0; ··· 2074 2046 } 2075 2047 2076 2048 pm_runtime_mark_last_busy(dev); 2049 + 2077 2050 out: 2078 - dev->power.needs_force_resume = 0; 2051 + /* 2052 + * The smart_suspend flag can be cleared here because it is not going 2053 + * to be necessary until the next system-wide suspend transition that 2054 + * will update it again. 2055 + */ 2056 + dev->power.smart_suspend = false; 2057 + /* 2058 + * Also clear needs_force_resume to make this function skip devices that 2059 + * have been seen by it once. 2060 + */ 2061 + dev->power.needs_force_resume = false; 2062 + 2079 2063 pm_runtime_enable(dev); 2080 2064 return ret; 2081 2065 } 2082 2066 EXPORT_SYMBOL_GPL(pm_runtime_force_resume); 2067 + 2068 + bool pm_runtime_need_not_resume(struct device *dev) 2069 + { 2070 + return atomic_read(&dev->power.usage_count) <= 1 && 2071 + (atomic_read(&dev->power.child_count) == 0 || 2072 + dev->power.ignore_children); 2073 + } 2074 + 2075 + #endif /* CONFIG_PM_SLEEP */
+4
drivers/pci/pci-driver.c
··· 708 708 struct pci_dev *pci_dev = to_pci_dev(dev); 709 709 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 710 710 711 + dev_pm_set_strict_midlayer(dev, true); 712 + 711 713 if (pm && pm->prepare) { 712 714 int error = pm->prepare(dev); 713 715 if (error < 0) ··· 751 749 if (pci_dev->current_state < pre_sleep_state) 752 750 pm_request_resume(dev); 753 751 } 752 + 753 + dev_pm_set_strict_midlayer(dev, false); 754 754 } 755 755 756 756 #else /* !CONFIG_PM_SLEEP */
+27
include/linux/device.h
··· 879 879 #endif 880 880 } 881 881 882 + /* 883 + * dev_pm_set_strict_midlayer - Update the device's power.strict_midlayer flag 884 + * @dev: Target device. 885 + * @val: New flag value. 886 + * 887 + * When set, power.strict_midlayer means that the middle layer power management 888 + * code (typically, a bus type or a PM domain) does not expect its runtime PM 889 + * suspend callback to be invoked at all during system-wide PM transitions and 890 + * it does not expect its runtime PM resume callback to be invoked at any point 891 + * when runtime PM is disabled for the device during system-wide PM transitions. 892 + */ 893 + static inline void dev_pm_set_strict_midlayer(struct device *dev, bool val) 894 + { 895 + #ifdef CONFIG_PM_SLEEP 896 + dev->power.strict_midlayer = val; 897 + #endif 898 + } 899 + 900 + static inline bool dev_pm_strict_midlayer_is_set(struct device *dev) 901 + { 902 + #ifdef CONFIG_PM_SLEEP 903 + return dev->power.strict_midlayer; 904 + #else 905 + return false; 906 + #endif 907 + } 908 + 882 909 static inline void device_lock(struct device *dev) 883 910 { 884 911 mutex_lock(&dev->mutex);
+1
include/linux/pm.h
··· 683 683 bool smart_suspend:1; /* Owned by the PM core */ 684 684 bool must_resume:1; /* Owned by the PM core */ 685 685 bool may_skip_resume:1; /* Set by subsystems */ 686 + bool strict_midlayer:1; 686 687 #else 687 688 bool should_wakeup:1; 688 689 #endif
+12 -4
include/linux/pm_runtime.h
··· 66 66 67 67 extern int pm_generic_runtime_suspend(struct device *dev); 68 68 extern int pm_generic_runtime_resume(struct device *dev); 69 - extern bool pm_runtime_need_not_resume(struct device *dev); 70 69 extern int pm_runtime_force_suspend(struct device *dev); 71 - extern int pm_runtime_force_resume(struct device *dev); 72 70 73 71 extern int __pm_runtime_idle(struct device *dev, int rpmflags); 74 72 extern int __pm_runtime_suspend(struct device *dev, int rpmflags); ··· 255 257 256 258 static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } 257 259 static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } 258 - static inline bool pm_runtime_need_not_resume(struct device *dev) {return true; } 259 260 static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } 260 - static inline int pm_runtime_force_resume(struct device *dev) { return 0; } 261 261 262 262 static inline int __pm_runtime_idle(struct device *dev, int rpmflags) 263 263 { ··· 325 329 static inline void pm_runtime_release_supplier(struct device_link *link) {} 326 330 327 331 #endif /* !CONFIG_PM */ 332 + 333 + #ifdef CONFIG_PM_SLEEP 334 + 335 + bool pm_runtime_need_not_resume(struct device *dev); 336 + int pm_runtime_force_resume(struct device *dev); 337 + 338 + #else /* !CONFIG_PM_SLEEP */ 339 + 340 + static inline bool pm_runtime_need_not_resume(struct device *dev) {return true; } 341 + static inline int pm_runtime_force_resume(struct device *dev) { return -ENXIO; } 342 + 343 + #endif /* CONFIG_PM_SLEEP */ 328 344 329 345 /** 330 346 * pm_runtime_idle - Conditionally set up autosuspend of a device or suspend it.
+1 -3
kernel/kexec_core.c
··· 1080 1080 console_suspend_all(); 1081 1081 error = dpm_suspend_start(PMSG_FREEZE); 1082 1082 if (error) 1083 - goto Resume_console; 1083 + goto Resume_devices; 1084 1084 /* 1085 1085 * dpm_suspend_end() must be called after dpm_suspend_start() 1086 1086 * to complete the transition, like in the hibernation flows ··· 1135 1135 dpm_resume_start(PMSG_RESTORE); 1136 1136 Resume_devices: 1137 1137 dpm_resume_end(PMSG_RESTORE); 1138 - Resume_console: 1139 - pm_restore_gfp_mask(); 1140 1138 console_resume_all(); 1141 1139 thaw_processes(); 1142 1140 Restore_console:
+6 -1
kernel/power/console.c
··· 16 16 #define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) 17 17 18 18 static int orig_fgconsole, orig_kmsg; 19 + static bool vt_switch_done; 19 20 20 21 static DEFINE_MUTEX(vt_switch_mutex); 21 22 ··· 137 136 if (orig_fgconsole < 0) 138 137 return; 139 138 139 + vt_switch_done = true; 140 + 140 141 orig_kmsg = vt_kmsg_redirect(SUSPEND_CONSOLE); 141 142 return; 142 143 } 143 144 144 145 void pm_restore_console(void) 145 146 { 146 - if (!pm_vt_switch()) 147 + if (!pm_vt_switch() && !vt_switch_done) 147 148 return; 148 149 149 150 if (orig_fgconsole >= 0) { 150 151 vt_move_to_console(orig_fgconsole, 0); 151 152 vt_kmsg_redirect(orig_kmsg); 152 153 } 154 + 155 + vt_switch_done = false; 153 156 }
+9
kernel/power/main.c
··· 8 8 9 9 #include <linux/acpi.h> 10 10 #include <linux/export.h> 11 + #include <linux/init.h> 11 12 #include <linux/kobject.h> 12 13 #include <linux/string.h> 13 14 #include <linux/pm-trace.h> ··· 112 111 113 112 /* If set, devices may be suspended and resumed asynchronously. */ 114 113 int pm_async_enabled = 1; 114 + 115 + static int __init pm_async_setup(char *str) 116 + { 117 + if (!strcmp(str, "off")) 118 + pm_async_enabled = 0; 119 + return 1; 120 + } 121 + __setup("pm_async=", pm_async_setup); 115 122 116 123 static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr, 117 124 char *buf)