Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'pm-sleep'

Merge updates related to system suspend and hibernation for
6.20-rc1/7.0-rc1:

- Stop flagging the PM runtime workqueue as freezable to avoid system
suspend and resume deadlocks in subsystems that assume asynchronous
runtime PM to work during system-wide PM transitions (Rafael Wysocki)

- Drop redundant NULL pointer checks before acomp_request_free() from
the hibernation code handling image saving (Rafael Wysocki)

- Update wakeup_sources_walk_start() to handle empty lists of wakeup
sources as appropriate (Samuel Wu)

- Make dev_pm_clear_wake_irq() check the power.wakeirq value under
power.lock to avoid race conditions (Gui-Dong Han)

- Avoid bit field races related to power.work_in_progress in the core
device suspend code (Xuewen Yan)

* pm-sleep:
PM: sleep: core: Avoid bit field races related to work_in_progress
PM: sleep: wakeirq: harden dev_pm_clear_wake_irq() against races
PM: wakeup: Handle empty list in wakeup_sources_walk_start()
PM: hibernate: Drop NULL pointer checks before acomp_request_free()
PM: sleep: Do not flag runtime PM workqueue as freezable

+21 -20
+3 -4
Documentation/power/runtime_pm.rst
··· 712 712 * During system suspend pm_runtime_get_noresume() is called for every device 713 713 right before executing the subsystem-level .prepare() callback for it and 714 714 pm_runtime_barrier() is called for every device right before executing the 715 - subsystem-level .suspend() callback for it. In addition to that the PM core 716 - calls __pm_runtime_disable() with 'false' as the second argument for every 717 - device right before executing the subsystem-level .suspend_late() callback 718 - for it. 715 + subsystem-level .suspend() callback for it. In addition to that, the PM 716 + core disables runtime PM for every device right before executing the 717 + subsystem-level .suspend_late() callback for it. 719 718 720 719 * During system resume pm_runtime_enable() and pm_runtime_put() are called for 721 720 every device right after executing the subsystem-level .resume_early()
+4 -3
drivers/base/power/main.c
··· 1647 1647 goto Complete; 1648 1648 1649 1649 /* 1650 - * Disable runtime PM for the device without checking if there is a 1651 - * pending resume request for it. 1650 + * After this point, any runtime PM operations targeting the device 1651 + * will fail until the corresponding pm_runtime_enable() call in 1652 + * device_resume_early(). 1652 1653 */ 1653 - __pm_runtime_disable(dev, false); 1654 + pm_runtime_disable(dev); 1654 1655 1655 1656 if (dev->power.syscore) 1656 1657 goto Skip;
+7 -4
drivers/base/power/wakeirq.c
··· 83 83 */ 84 84 void dev_pm_clear_wake_irq(struct device *dev) 85 85 { 86 - struct wake_irq *wirq = dev->power.wakeirq; 86 + struct wake_irq *wirq; 87 87 unsigned long flags; 88 88 89 - if (!wirq) 90 - return; 91 - 92 89 spin_lock_irqsave(&dev->power.lock, flags); 90 + wirq = dev->power.wakeirq; 91 + if (!wirq) { 92 + spin_unlock_irqrestore(&dev->power.lock, flags); 93 + return; 94 + } 95 + 93 96 device_wakeup_detach_irq(dev); 94 97 dev->power.wakeirq = NULL; 95 98 spin_unlock_irqrestore(&dev->power.lock, flags);
+1 -3
drivers/base/power/wakeup.c
··· 275 275 */ 276 276 struct wakeup_source *wakeup_sources_walk_start(void) 277 277 { 278 - struct list_head *ws_head = &wakeup_sources; 279 - 280 - return list_entry_rcu(ws_head->next, struct wakeup_source, entry); 278 + return list_first_or_null_rcu(&wakeup_sources, struct wakeup_source, entry); 281 279 } 282 280 EXPORT_SYMBOL_GPL(wakeup_sources_walk_start); 283 281
+1 -1
include/linux/pm.h
··· 681 681 struct list_head entry; 682 682 struct completion completion; 683 683 struct wakeup_source *wakeup; 684 + bool work_in_progress; /* Owned by the PM core */ 684 685 bool wakeup_path:1; 685 686 bool syscore:1; 686 687 bool no_pm_callbacks:1; /* Owned by the PM core */ 687 - bool work_in_progress:1; /* Owned by the PM core */ 688 688 bool smart_suspend:1; /* Owned by the PM core */ 689 689 bool must_resume:1; /* Owned by the PM core */ 690 690 bool may_skip_resume:1; /* Set by subsystems */
+1 -1
kernel/power/main.c
··· 1125 1125 1126 1126 static int __init pm_start_workqueues(void) 1127 1127 { 1128 - pm_wq = alloc_workqueue("pm", WQ_FREEZABLE | WQ_UNBOUND, 0); 1128 + pm_wq = alloc_workqueue("pm", WQ_UNBOUND, 0); 1129 1129 if (!pm_wq) 1130 1130 return -ENOMEM; 1131 1131
+4 -4
kernel/power/swap.c
··· 902 902 for (thr = 0; thr < nr_threads; thr++) { 903 903 if (data[thr].thr) 904 904 kthread_stop(data[thr].thr); 905 - if (data[thr].cr) 906 - acomp_request_free(data[thr].cr); 905 + 906 + acomp_request_free(data[thr].cr); 907 907 908 908 if (!IS_ERR_OR_NULL(data[thr].cc)) 909 909 crypto_free_acomp(data[thr].cc); ··· 1502 1502 for (thr = 0; thr < nr_threads; thr++) { 1503 1503 if (data[thr].thr) 1504 1504 kthread_stop(data[thr].thr); 1505 - if (data[thr].cr) 1506 - acomp_request_free(data[thr].cr); 1505 + 1506 + acomp_request_free(data[thr].cr); 1507 1507 1508 1508 if (!IS_ERR_OR_NULL(data[thr].cc)) 1509 1509 crypto_free_acomp(data[thr].cc);