Merge branches 'pm-core', 'pm-runtime' and 'pm-sleep'

Merge changes related to system sleep and runtime PM framework for
6.18-rc1:

 - Annotate loops walking device links in the power management core
   code as _srcu and add macros for walking device links to reduce the
   likelihood of coding mistakes related to them (Rafael Wysocki)

 - Document time units for *_time functions in the runtime PM API (Brian
   Norris)

 - Clear power.must_resume in noirq suspend error path to avoid resuming
   a dependant device under a suspended parent or supplier (Rafael
   Wysocki)

 - Fix GFP mask handling during hybrid suspend and make the amdgpu
   driver handle hybrid suspend correctly (Mario Limonciello, Rafael
   Wysocki)

 - Fix GFP mask handling after aborted hibernation in platform mode and
   combine exit paths in power_down() to avoid code duplication (Rafael
   Wysocki)

 - Use vmalloc_array() and vcalloc() in the hibernation core to avoid
   open-coded size computations (Qianfeng Rong)

 - Fix typo in hibernation core code comment (Li Jun)

 - Call pm_wakeup_clear() in the same place where other functions that do
   bookkeeping prior to suspend_prepare() are called (Samuel Wu)

* pm-core:
  PM: core: Add two macros for walking device links
  PM: core: Annotate loops walking device links as _srcu

* pm-runtime:
  PM: runtime: Documentation: ABI: Document time units for *_time

* pm-sleep:
  PM: hibernate: Combine return paths in power_down()
  PM: hibernate: Restrict GFP mask in power_down()
  PM: hibernate: Fix pm_hibernation_mode_is_suspend() build breakage
  drm/amd: Fix hybrid sleep
  PM: hibernate: Add pm_hibernation_mode_is_suspend()
  PM: hibernate: Fix hybrid-sleep
  PM: sleep: core: Clear power.must_resume in noirq suspend error path
  PM: sleep: Make pm_wakeup_clear() call more clear
  PM: hibernate: Fix typo in memory bitmaps description comment
  PM: hibernate: Use vmalloc_array() and vcalloc() to improve code
This commit is contained in:
Rafael J. Wysocki 2025-09-29 12:54:01 +02:00
commit f58f86df6a
11 changed files with 71 additions and 37 deletions

View file

@ -274,15 +274,15 @@ What: /sys/devices/.../power/runtime_active_time
Date: Jul 2010
Contact: Arjan van de Ven <arjan@linux.intel.com>
Description:
Reports the total time that the device has been active.
Used for runtime PM statistics.
Reports the total time that the device has been active, in
milliseconds. Used for runtime PM statistics.
What: /sys/devices/.../power/runtime_suspended_time
Date: Jul 2010
Contact: Arjan van de Ven <arjan@linux.intel.com>
Description:
Reports total time that the device has been suspended.
Used for runtime PM statistics.
Reports total time that the device has been suspended, in
milliseconds. Used for runtime PM statistics.
What: /sys/devices/.../power/runtime_usage
Date: Apr 2010

View file

@ -251,6 +251,14 @@ void device_links_unbind_consumers(struct device *dev);
void fw_devlink_drivers_done(void);
void fw_devlink_probing_done(void);
#define dev_for_each_link_to_supplier(__link, __dev) \
list_for_each_entry_srcu(__link, &(__dev)->links.suppliers, c_node, \
device_links_read_lock_held())
#define dev_for_each_link_to_consumer(__link, __dev) \
list_for_each_entry_srcu(__link, &(__dev)->links.consumers, s_node, \
device_links_read_lock_held())
/* device pm support */
void device_pm_move_to_tail(struct device *dev);

View file

@ -40,10 +40,6 @@
typedef int (*pm_callback_t)(struct device *);
#define list_for_each_entry_rcu_locked(pos, head, member) \
list_for_each_entry_rcu(pos, head, member, \
device_links_read_lock_held())
/*
* The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and
@ -281,7 +277,7 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async)
* callbacks freeing the link objects for the links in the list we're
* walking.
*/
list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
dev_for_each_link_to_supplier(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->supplier, async);
@ -338,7 +334,7 @@ static void dpm_wait_for_consumers(struct device *dev, bool async)
* continue instead of trying to continue in parallel with its
* unregistration).
*/
list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
dev_for_each_link_to_consumer(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->consumer, async);
@ -675,7 +671,7 @@ static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
idx = device_links_read_lock();
/* Start processing the device's "async" consumers. */
list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
dev_for_each_link_to_consumer(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_async_with_cleanup(link->consumer, func);
@ -724,8 +720,20 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy
if (dev->power.syscore || dev->power.direct_complete)
goto Out;
if (!dev->power.is_noirq_suspended)
if (!dev->power.is_noirq_suspended) {
/*
* This means that system suspend has been aborted in the noirq
* phase before invoking the noirq suspend callback for the
* device, so if device_suspend_late() has left it in suspend,
* device_resume_early() should leave it in suspend either in
* case the early resume of it depends on the noirq resume that
* has not run.
*/
if (dev_pm_skip_suspend(dev))
dev->power.must_resume = false;
goto Out;
}
if (!dpm_wait_for_superior(dev, async))
goto Out;
@ -1330,7 +1338,7 @@ static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
idx = device_links_read_lock();
/* Start processing the device's "async" suppliers. */
list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
dev_for_each_link_to_supplier(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_async_with_cleanup(link->supplier, func);
@ -1384,7 +1392,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
idx = device_links_read_lock();
list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
dev_for_each_link_to_supplier(link, dev)
link->supplier->power.must_resume = true;
device_links_read_unlock(idx);
@ -1813,7 +1821,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
idx = device_links_read_lock();
list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
dev_for_each_link_to_supplier(link, dev) {
spin_lock_irq(&link->supplier->power.lock);
link->supplier->power.direct_complete = false;
spin_unlock_irq(&link->supplier->power.lock);
@ -2065,7 +2073,7 @@ static bool device_prepare_smart_suspend(struct device *dev)
idx = device_links_read_lock();
list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
dev_for_each_link_to_supplier(link, dev) {
if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
continue;

View file

@ -1903,8 +1903,7 @@ void pm_runtime_get_suppliers(struct device *dev)
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
device_links_read_lock_held())
dev_for_each_link_to_supplier(link, dev)
if (device_link_test(link, DL_FLAG_PM_RUNTIME)) {
link->supplier_preactivated = true;
pm_runtime_get_sync(link->supplier);

View file

@ -2665,7 +2665,7 @@ static int amdgpu_pmops_thaw(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
/* do not resume device if it's normal hibernation */
if (!pm_hibernate_is_recovering())
if (!pm_hibernate_is_recovering() && !pm_hibernation_mode_is_suspend())
return 0;
return amdgpu_device_resume(drm_dev, true);

View file

@ -418,6 +418,12 @@ static inline int hibernate_quiet_exec(int (*func)(void *data), void *data) {
}
#endif /* CONFIG_HIBERNATION */
#if defined(CONFIG_HIBERNATION) && defined(CONFIG_SUSPEND)
bool pm_hibernation_mode_is_suspend(void);
#else
static inline bool pm_hibernation_mode_is_suspend(void) { return false; }
#endif
int arch_resume_nosmt(void);
#ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV

View file

@ -80,6 +80,17 @@ static const struct platform_hibernation_ops *hibernation_ops;
static atomic_t hibernate_atomic = ATOMIC_INIT(1);
#ifdef CONFIG_SUSPEND
/**
* pm_hibernation_mode_is_suspend - Check if hibernation has been set to suspend
*/
bool pm_hibernation_mode_is_suspend(void)
{
return hibernation_mode == HIBERNATION_SUSPEND;
}
EXPORT_SYMBOL_GPL(pm_hibernation_mode_is_suspend);
#endif
bool hibernate_acquire(void)
{
return atomic_add_unless(&hibernate_atomic, -1, 0);
@ -695,19 +706,13 @@ static void power_down(void)
#ifdef CONFIG_SUSPEND
if (hibernation_mode == HIBERNATION_SUSPEND) {
pm_restore_gfp_mask();
error = suspend_devices_and_enter(mem_sleep_current);
if (error) {
hibernation_mode = hibernation_ops ?
HIBERNATION_PLATFORM :
HIBERNATION_SHUTDOWN;
} else {
/* Restore swap signature. */
error = swsusp_unmark();
if (error)
pr_err("Swap will be unusable! Try swapon -a.\n");
if (!error)
goto exit;
return;
}
hibernation_mode = hibernation_ops ? HIBERNATION_PLATFORM :
HIBERNATION_SHUTDOWN;
}
#endif
@ -718,10 +723,9 @@ static void power_down(void)
case HIBERNATION_PLATFORM:
error = hibernation_platform_enter();
if (error == -EAGAIN || error == -EBUSY) {
swsusp_unmark();
events_check_enabled = false;
pr_info("Wakeup event detected during hibernation, rolling back.\n");
return;
goto exit;
}
fallthrough;
case HIBERNATION_SHUTDOWN:
@ -740,6 +744,15 @@ static void power_down(void)
pr_crit("Power down manually\n");
while (1)
cpu_relax();
exit:
/* Match the pm_restore_gfp_mask() call in hibernate(). */
pm_restrict_gfp_mask();
/* Restore swap signature. */
error = swsusp_unmark();
if (error)
pr_err("Swap will be unusable! Try swapon -a.\n");
}
static int load_image_and_restore(void)

View file

@ -132,7 +132,6 @@ int freeze_processes(void)
if (!pm_freezing)
static_branch_inc(&freezer_active);
pm_wakeup_clear(0);
pm_freezing = true;
error = try_to_freeze_tasks(true);
if (!error)

View file

@ -363,7 +363,7 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
*
* One radix tree is represented by one struct mem_zone_bm_rtree. There are
* two linked lists for the nodes of the tree, one for the inner nodes and
* one for the leave nodes. The linked leave nodes are used for fast linear
* one for the leaf nodes. The linked leaf nodes are used for fast linear
* access of the memory bitmap.
*
* The struct rtree_node represents one node of the radix tree.

View file

@ -595,6 +595,7 @@ static int enter_state(suspend_state_t state)
}
pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]);
pm_wakeup_clear(0);
pm_suspend_clear_flags();
error = suspend_prepare(state);
if (error)

View file

@ -712,7 +712,7 @@ static int save_compressed_image(struct swap_map_handle *handle,
goto out_clean;
}
data = vzalloc(array_size(nr_threads, sizeof(*data)));
data = vcalloc(nr_threads, sizeof(*data));
if (!data) {
pr_err("Failed to allocate %s data\n", hib_comp_algo);
ret = -ENOMEM;
@ -1225,14 +1225,14 @@ static int load_compressed_image(struct swap_map_handle *handle,
nr_threads = num_online_cpus() - 1;
nr_threads = clamp_val(nr_threads, 1, CMP_THREADS);
page = vmalloc(array_size(CMP_MAX_RD_PAGES, sizeof(*page)));
page = vmalloc_array(CMP_MAX_RD_PAGES, sizeof(*page));
if (!page) {
pr_err("Failed to allocate %s page\n", hib_comp_algo);
ret = -ENOMEM;
goto out_clean;
}
data = vzalloc(array_size(nr_threads, sizeof(*data)));
data = vcalloc(nr_threads, sizeof(*data));
if (!data) {
pr_err("Failed to allocate %s data\n", hib_comp_algo);
ret = -ENOMEM;