mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	Merge branches 'pm-core', 'pm-runtime' and 'pm-sleep'
Merge changes related to system sleep and runtime PM framework for 6.18-rc1: - Annotate loops walking device links in the power management core code as _srcu and add macros for walking device links to reduce the likelihood of coding mistakes related to them (Rafael Wysocki) - Document time units for *_time functions in the runtime PM API (Brian Norris) - Clear power.must_resume in noirq suspend error path to avoid resuming a dependant device under a suspended parent or supplier (Rafael Wysocki) - Fix GFP mask handling during hybrid suspend and make the amdgpu driver handle hybrid suspend correctly (Mario Limonciello, Rafael Wysocki) - Fix GFP mask handling after aborted hibernation in platform mode and combine exit paths in power_down() to avoid code duplication (Rafael Wysocki) - Use vmalloc_array() and vcalloc() in the hibernation core to avoid open-coded size computations (Qianfeng Rong) - Fix typo in hibernation core code comment (Li Jun) - Call pm_wakeup_clear() in the same place where other functions that do bookkeeping prior to suspend_prepare() are called (Samuel Wu) * pm-core: PM: core: Add two macros for walking device links PM: core: Annotate loops walking device links as _srcu * pm-runtime: PM: runtime: Documentation: ABI: Document time units for *_time * pm-sleep: PM: hibernate: Combine return paths in power_down() PM: hibernate: Restrict GFP mask in power_down() PM: hibernate: Fix pm_hibernation_mode_is_suspend() build breakage drm/amd: Fix hybrid sleep PM: hibernate: Add pm_hibernation_mode_is_suspend() PM: hibernate: Fix hybrid-sleep PM: sleep: core: Clear power.must_resume in noirq suspend error path PM: sleep: Make pm_wakeup_clear() call more clear PM: hibernate: Fix typo in memory bitmaps description comment PM: hibernate: Use vmalloc_array() and vcalloc() to improve code
This commit is contained in:
		
						commit
						f58f86df6a
					
				
					 11 changed files with 71 additions and 37 deletions
				
			
		|  | @ -274,15 +274,15 @@ What:		/sys/devices/.../power/runtime_active_time | ||||||
| Date:		Jul 2010 | Date:		Jul 2010 | ||||||
| Contact:	Arjan van de Ven <arjan@linux.intel.com> | Contact:	Arjan van de Ven <arjan@linux.intel.com> | ||||||
| Description: | Description: | ||||||
| 		Reports the total time that the device has been active. | 		Reports the total time that the device has been active, in | ||||||
| 		Used for runtime PM statistics. | 		milliseconds. Used for runtime PM statistics. | ||||||
| 
 | 
 | ||||||
| What:		/sys/devices/.../power/runtime_suspended_time | What:		/sys/devices/.../power/runtime_suspended_time | ||||||
| Date:		Jul 2010 | Date:		Jul 2010 | ||||||
| Contact:	Arjan van de Ven <arjan@linux.intel.com> | Contact:	Arjan van de Ven <arjan@linux.intel.com> | ||||||
| Description: | Description: | ||||||
| 		Reports total time that the device has been suspended. | 		Reports total time that the device has been suspended, in | ||||||
| 		Used for runtime PM statistics. | 		milliseconds. Used for runtime PM statistics. | ||||||
| 
 | 
 | ||||||
| What:		/sys/devices/.../power/runtime_usage | What:		/sys/devices/.../power/runtime_usage | ||||||
| Date:		Apr 2010 | Date:		Apr 2010 | ||||||
|  |  | ||||||
|  | @ -251,6 +251,14 @@ void device_links_unbind_consumers(struct device *dev); | ||||||
| void fw_devlink_drivers_done(void); | void fw_devlink_drivers_done(void); | ||||||
| void fw_devlink_probing_done(void); | void fw_devlink_probing_done(void); | ||||||
| 
 | 
 | ||||||
|  | #define dev_for_each_link_to_supplier(__link, __dev)	\ | ||||||
|  | 	list_for_each_entry_srcu(__link, &(__dev)->links.suppliers, c_node, \ | ||||||
|  | 				 device_links_read_lock_held()) | ||||||
|  | 
 | ||||||
|  | #define dev_for_each_link_to_consumer(__link, __dev)	\ | ||||||
|  | 	list_for_each_entry_srcu(__link, &(__dev)->links.consumers, s_node, \ | ||||||
|  | 				 device_links_read_lock_held()) | ||||||
|  | 
 | ||||||
| /* device pm support */ | /* device pm support */ | ||||||
| void device_pm_move_to_tail(struct device *dev); | void device_pm_move_to_tail(struct device *dev); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -40,10 +40,6 @@ | ||||||
| 
 | 
 | ||||||
| typedef int (*pm_callback_t)(struct device *); | typedef int (*pm_callback_t)(struct device *); | ||||||
| 
 | 
 | ||||||
| #define list_for_each_entry_rcu_locked(pos, head, member) \ |  | ||||||
| 	list_for_each_entry_rcu(pos, head, member, \ |  | ||||||
| 			device_links_read_lock_held()) |  | ||||||
| 
 |  | ||||||
| /*
 | /*
 | ||||||
|  * The entries in the dpm_list list are in a depth first order, simply |  * The entries in the dpm_list list are in a depth first order, simply | ||||||
|  * because children are guaranteed to be discovered after parents, and |  * because children are guaranteed to be discovered after parents, and | ||||||
|  | @ -281,7 +277,7 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async) | ||||||
| 	 * callbacks freeing the link objects for the links in the list we're | 	 * callbacks freeing the link objects for the links in the list we're | ||||||
| 	 * walking. | 	 * walking. | ||||||
| 	 */ | 	 */ | ||||||
| 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) | 	dev_for_each_link_to_supplier(link, dev) | ||||||
| 		if (READ_ONCE(link->status) != DL_STATE_DORMANT) | 		if (READ_ONCE(link->status) != DL_STATE_DORMANT) | ||||||
| 			dpm_wait(link->supplier, async); | 			dpm_wait(link->supplier, async); | ||||||
| 
 | 
 | ||||||
|  | @ -338,7 +334,7 @@ static void dpm_wait_for_consumers(struct device *dev, bool async) | ||||||
| 	 * continue instead of trying to continue in parallel with its | 	 * continue instead of trying to continue in parallel with its | ||||||
| 	 * unregistration). | 	 * unregistration). | ||||||
| 	 */ | 	 */ | ||||||
| 	list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node) | 	dev_for_each_link_to_consumer(link, dev) | ||||||
| 		if (READ_ONCE(link->status) != DL_STATE_DORMANT) | 		if (READ_ONCE(link->status) != DL_STATE_DORMANT) | ||||||
| 			dpm_wait(link->consumer, async); | 			dpm_wait(link->consumer, async); | ||||||
| 
 | 
 | ||||||
|  | @ -675,7 +671,7 @@ static void dpm_async_resume_subordinate(struct device *dev, async_func_t func) | ||||||
| 	idx = device_links_read_lock(); | 	idx = device_links_read_lock(); | ||||||
| 
 | 
 | ||||||
| 	/* Start processing the device's "async" consumers. */ | 	/* Start processing the device's "async" consumers. */ | ||||||
| 	list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node) | 	dev_for_each_link_to_consumer(link, dev) | ||||||
| 		if (READ_ONCE(link->status) != DL_STATE_DORMANT) | 		if (READ_ONCE(link->status) != DL_STATE_DORMANT) | ||||||
| 			dpm_async_with_cleanup(link->consumer, func); | 			dpm_async_with_cleanup(link->consumer, func); | ||||||
| 
 | 
 | ||||||
|  | @ -724,8 +720,20 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy | ||||||
| 	if (dev->power.syscore || dev->power.direct_complete) | 	if (dev->power.syscore || dev->power.direct_complete) | ||||||
| 		goto Out; | 		goto Out; | ||||||
| 
 | 
 | ||||||
| 	if (!dev->power.is_noirq_suspended) | 	if (!dev->power.is_noirq_suspended) { | ||||||
|  | 		/*
 | ||||||
|  | 		 * This means that system suspend has been aborted in the noirq | ||||||
|  | 		 * phase before invoking the noirq suspend callback for the | ||||||
|  | 		 * device, so if device_suspend_late() has left it in suspend, | ||||||
|  | 		 * device_resume_early() should leave it in suspend either in | ||||||
|  | 		 * case the early resume of it depends on the noirq resume that | ||||||
|  | 		 * has not run. | ||||||
|  | 		 */ | ||||||
|  | 		if (dev_pm_skip_suspend(dev)) | ||||||
|  | 			dev->power.must_resume = false; | ||||||
|  | 
 | ||||||
| 		goto Out; | 		goto Out; | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	if (!dpm_wait_for_superior(dev, async)) | 	if (!dpm_wait_for_superior(dev, async)) | ||||||
| 		goto Out; | 		goto Out; | ||||||
|  | @ -1330,7 +1338,7 @@ static void dpm_async_suspend_superior(struct device *dev, async_func_t func) | ||||||
| 	idx = device_links_read_lock(); | 	idx = device_links_read_lock(); | ||||||
| 
 | 
 | ||||||
| 	/* Start processing the device's "async" suppliers. */ | 	/* Start processing the device's "async" suppliers. */ | ||||||
| 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) | 	dev_for_each_link_to_supplier(link, dev) | ||||||
| 		if (READ_ONCE(link->status) != DL_STATE_DORMANT) | 		if (READ_ONCE(link->status) != DL_STATE_DORMANT) | ||||||
| 			dpm_async_with_cleanup(link->supplier, func); | 			dpm_async_with_cleanup(link->supplier, func); | ||||||
| 
 | 
 | ||||||
|  | @ -1384,7 +1392,7 @@ static void dpm_superior_set_must_resume(struct device *dev) | ||||||
| 
 | 
 | ||||||
| 	idx = device_links_read_lock(); | 	idx = device_links_read_lock(); | ||||||
| 
 | 
 | ||||||
| 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) | 	dev_for_each_link_to_supplier(link, dev) | ||||||
| 		link->supplier->power.must_resume = true; | 		link->supplier->power.must_resume = true; | ||||||
| 
 | 
 | ||||||
| 	device_links_read_unlock(idx); | 	device_links_read_unlock(idx); | ||||||
|  | @ -1813,7 +1821,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev) | ||||||
| 
 | 
 | ||||||
| 	idx = device_links_read_lock(); | 	idx = device_links_read_lock(); | ||||||
| 
 | 
 | ||||||
| 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { | 	dev_for_each_link_to_supplier(link, dev) { | ||||||
| 		spin_lock_irq(&link->supplier->power.lock); | 		spin_lock_irq(&link->supplier->power.lock); | ||||||
| 		link->supplier->power.direct_complete = false; | 		link->supplier->power.direct_complete = false; | ||||||
| 		spin_unlock_irq(&link->supplier->power.lock); | 		spin_unlock_irq(&link->supplier->power.lock); | ||||||
|  | @ -2065,7 +2073,7 @@ static bool device_prepare_smart_suspend(struct device *dev) | ||||||
| 
 | 
 | ||||||
| 	idx = device_links_read_lock(); | 	idx = device_links_read_lock(); | ||||||
| 
 | 
 | ||||||
| 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { | 	dev_for_each_link_to_supplier(link, dev) { | ||||||
| 		if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) | 		if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) | ||||||
| 			continue; | 			continue; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1903,8 +1903,7 @@ void pm_runtime_get_suppliers(struct device *dev) | ||||||
| 
 | 
 | ||||||
| 	idx = device_links_read_lock(); | 	idx = device_links_read_lock(); | ||||||
| 
 | 
 | ||||||
| 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, | 	dev_for_each_link_to_supplier(link, dev) | ||||||
| 				device_links_read_lock_held()) |  | ||||||
| 		if (device_link_test(link, DL_FLAG_PM_RUNTIME)) { | 		if (device_link_test(link, DL_FLAG_PM_RUNTIME)) { | ||||||
| 			link->supplier_preactivated = true; | 			link->supplier_preactivated = true; | ||||||
| 			pm_runtime_get_sync(link->supplier); | 			pm_runtime_get_sync(link->supplier); | ||||||
|  |  | ||||||
|  | @ -2665,7 +2665,7 @@ static int amdgpu_pmops_thaw(struct device *dev) | ||||||
| 	struct drm_device *drm_dev = dev_get_drvdata(dev); | 	struct drm_device *drm_dev = dev_get_drvdata(dev); | ||||||
| 
 | 
 | ||||||
| 	/* do not resume device if it's normal hibernation */ | 	/* do not resume device if it's normal hibernation */ | ||||||
| 	if (!pm_hibernate_is_recovering()) | 	if (!pm_hibernate_is_recovering() && !pm_hibernation_mode_is_suspend()) | ||||||
| 		return 0; | 		return 0; | ||||||
| 
 | 
 | ||||||
| 	return amdgpu_device_resume(drm_dev, true); | 	return amdgpu_device_resume(drm_dev, true); | ||||||
|  |  | ||||||
|  | @ -418,6 +418,12 @@ static inline int hibernate_quiet_exec(int (*func)(void *data), void *data) { | ||||||
| } | } | ||||||
| #endif /* CONFIG_HIBERNATION */ | #endif /* CONFIG_HIBERNATION */ | ||||||
| 
 | 
 | ||||||
|  | #if defined(CONFIG_HIBERNATION) && defined(CONFIG_SUSPEND) | ||||||
|  | bool pm_hibernation_mode_is_suspend(void); | ||||||
|  | #else | ||||||
|  | static inline bool pm_hibernation_mode_is_suspend(void) { return false; } | ||||||
|  | #endif | ||||||
|  | 
 | ||||||
| int arch_resume_nosmt(void); | int arch_resume_nosmt(void); | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV | #ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV | ||||||
|  |  | ||||||
|  | @ -80,6 +80,17 @@ static const struct platform_hibernation_ops *hibernation_ops; | ||||||
| 
 | 
 | ||||||
| static atomic_t hibernate_atomic = ATOMIC_INIT(1); | static atomic_t hibernate_atomic = ATOMIC_INIT(1); | ||||||
| 
 | 
 | ||||||
|  | #ifdef CONFIG_SUSPEND | ||||||
|  | /**
 | ||||||
|  |  * pm_hibernation_mode_is_suspend - Check if hibernation has been set to suspend | ||||||
|  |  */ | ||||||
|  | bool pm_hibernation_mode_is_suspend(void) | ||||||
|  | { | ||||||
|  | 	return hibernation_mode == HIBERNATION_SUSPEND; | ||||||
|  | } | ||||||
|  | EXPORT_SYMBOL_GPL(pm_hibernation_mode_is_suspend); | ||||||
|  | #endif | ||||||
|  | 
 | ||||||
| bool hibernate_acquire(void) | bool hibernate_acquire(void) | ||||||
| { | { | ||||||
| 	return atomic_add_unless(&hibernate_atomic, -1, 0); | 	return atomic_add_unless(&hibernate_atomic, -1, 0); | ||||||
|  | @ -695,19 +706,13 @@ static void power_down(void) | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_SUSPEND | #ifdef CONFIG_SUSPEND | ||||||
| 	if (hibernation_mode == HIBERNATION_SUSPEND) { | 	if (hibernation_mode == HIBERNATION_SUSPEND) { | ||||||
|  | 		pm_restore_gfp_mask(); | ||||||
| 		error = suspend_devices_and_enter(mem_sleep_current); | 		error = suspend_devices_and_enter(mem_sleep_current); | ||||||
| 		if (error) { | 		if (!error) | ||||||
| 			hibernation_mode = hibernation_ops ? | 			goto exit; | ||||||
| 						HIBERNATION_PLATFORM : |  | ||||||
| 						HIBERNATION_SHUTDOWN; |  | ||||||
| 		} else { |  | ||||||
| 			/* Restore swap signature. */ |  | ||||||
| 			error = swsusp_unmark(); |  | ||||||
| 			if (error) |  | ||||||
| 				pr_err("Swap will be unusable! Try swapon -a.\n"); |  | ||||||
| 
 | 
 | ||||||
| 			return; | 		hibernation_mode = hibernation_ops ? HIBERNATION_PLATFORM : | ||||||
| 		} | 						     HIBERNATION_SHUTDOWN; | ||||||
| 	} | 	} | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
|  | @ -718,10 +723,9 @@ static void power_down(void) | ||||||
| 	case HIBERNATION_PLATFORM: | 	case HIBERNATION_PLATFORM: | ||||||
| 		error = hibernation_platform_enter(); | 		error = hibernation_platform_enter(); | ||||||
| 		if (error == -EAGAIN || error == -EBUSY) { | 		if (error == -EAGAIN || error == -EBUSY) { | ||||||
| 			swsusp_unmark(); |  | ||||||
| 			events_check_enabled = false; | 			events_check_enabled = false; | ||||||
| 			pr_info("Wakeup event detected during hibernation, rolling back.\n"); | 			pr_info("Wakeup event detected during hibernation, rolling back.\n"); | ||||||
| 			return; | 			goto exit; | ||||||
| 		} | 		} | ||||||
| 		fallthrough; | 		fallthrough; | ||||||
| 	case HIBERNATION_SHUTDOWN: | 	case HIBERNATION_SHUTDOWN: | ||||||
|  | @ -740,6 +744,15 @@ static void power_down(void) | ||||||
| 	pr_crit("Power down manually\n"); | 	pr_crit("Power down manually\n"); | ||||||
| 	while (1) | 	while (1) | ||||||
| 		cpu_relax(); | 		cpu_relax(); | ||||||
|  | 
 | ||||||
|  | exit: | ||||||
|  | 	/* Match the pm_restore_gfp_mask() call in hibernate(). */ | ||||||
|  | 	pm_restrict_gfp_mask(); | ||||||
|  | 
 | ||||||
|  | 	/* Restore swap signature. */ | ||||||
|  | 	error = swsusp_unmark(); | ||||||
|  | 	if (error) | ||||||
|  | 		pr_err("Swap will be unusable! Try swapon -a.\n"); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int load_image_and_restore(void) | static int load_image_and_restore(void) | ||||||
|  |  | ||||||
|  | @ -132,7 +132,6 @@ int freeze_processes(void) | ||||||
| 	if (!pm_freezing) | 	if (!pm_freezing) | ||||||
| 		static_branch_inc(&freezer_active); | 		static_branch_inc(&freezer_active); | ||||||
| 
 | 
 | ||||||
| 	pm_wakeup_clear(0); |  | ||||||
| 	pm_freezing = true; | 	pm_freezing = true; | ||||||
| 	error = try_to_freeze_tasks(true); | 	error = try_to_freeze_tasks(true); | ||||||
| 	if (!error) | 	if (!error) | ||||||
|  |  | ||||||
|  | @ -363,7 +363,7 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size) | ||||||
|  * |  * | ||||||
|  * One radix tree is represented by one struct mem_zone_bm_rtree. There are |  * One radix tree is represented by one struct mem_zone_bm_rtree. There are | ||||||
|  * two linked lists for the nodes of the tree, one for the inner nodes and |  * two linked lists for the nodes of the tree, one for the inner nodes and | ||||||
|  * one for the leave nodes. The linked leave nodes are used for fast linear |  * one for the leaf nodes. The linked leaf nodes are used for fast linear | ||||||
|  * access of the memory bitmap. |  * access of the memory bitmap. | ||||||
|  * |  * | ||||||
|  * The struct rtree_node represents one node of the radix tree. |  * The struct rtree_node represents one node of the radix tree. | ||||||
|  |  | ||||||
|  | @ -595,6 +595,7 @@ static int enter_state(suspend_state_t state) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]); | 	pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]); | ||||||
|  | 	pm_wakeup_clear(0); | ||||||
| 	pm_suspend_clear_flags(); | 	pm_suspend_clear_flags(); | ||||||
| 	error = suspend_prepare(state); | 	error = suspend_prepare(state); | ||||||
| 	if (error) | 	if (error) | ||||||
|  |  | ||||||
|  | @ -712,7 +712,7 @@ static int save_compressed_image(struct swap_map_handle *handle, | ||||||
| 		goto out_clean; | 		goto out_clean; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	data = vzalloc(array_size(nr_threads, sizeof(*data))); | 	data = vcalloc(nr_threads, sizeof(*data)); | ||||||
| 	if (!data) { | 	if (!data) { | ||||||
| 		pr_err("Failed to allocate %s data\n", hib_comp_algo); | 		pr_err("Failed to allocate %s data\n", hib_comp_algo); | ||||||
| 		ret = -ENOMEM; | 		ret = -ENOMEM; | ||||||
|  | @ -1225,14 +1225,14 @@ static int load_compressed_image(struct swap_map_handle *handle, | ||||||
| 	nr_threads = num_online_cpus() - 1; | 	nr_threads = num_online_cpus() - 1; | ||||||
| 	nr_threads = clamp_val(nr_threads, 1, CMP_THREADS); | 	nr_threads = clamp_val(nr_threads, 1, CMP_THREADS); | ||||||
| 
 | 
 | ||||||
| 	page = vmalloc(array_size(CMP_MAX_RD_PAGES, sizeof(*page))); | 	page = vmalloc_array(CMP_MAX_RD_PAGES, sizeof(*page)); | ||||||
| 	if (!page) { | 	if (!page) { | ||||||
| 		pr_err("Failed to allocate %s page\n", hib_comp_algo); | 		pr_err("Failed to allocate %s page\n", hib_comp_algo); | ||||||
| 		ret = -ENOMEM; | 		ret = -ENOMEM; | ||||||
| 		goto out_clean; | 		goto out_clean; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	data = vzalloc(array_size(nr_threads, sizeof(*data))); | 	data = vcalloc(nr_threads, sizeof(*data)); | ||||||
| 	if (!data) { | 	if (!data) { | ||||||
| 		pr_err("Failed to allocate %s data\n", hib_comp_algo); | 		pr_err("Failed to allocate %s data\n", hib_comp_algo); | ||||||
| 		ret = -ENOMEM; | 		ret = -ENOMEM; | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Rafael J. Wysocki
						Rafael J. Wysocki