forked from mirrors/linux
		
	clk: Add generic sync_state callback for disabling unused clocks
There are unused clocks that need to remain untouched by clk_disable_unused, and most likely could be disabled later on sync_state. So provide a generic sync_state callback for the clock providers that register such clocks. Then, use the same mechanism as clk_disable_unused from that generic callback, but pass the device to make sure only the clocks belonging to the current clock provider get disabled, if unused. Also, during the default clk_disable_unused, if the driver that registered the clock has the generic clk_sync_state_disable_unused callback set for sync_state, skip disabling its clocks. Signed-off-by: Abel Vesa <abel.vesa@linaro.org> Reviewed-by: Bjorn Andersson <andersson@kernel.org> Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org> Signed-off-by: Bjorn Andersson <andersson@kernel.org> Link: https://lore.kernel.org/r/20221227204528.1899863-1-abel.vesa@linaro.org
This commit is contained in:
		
							parent
							
								
									ce273e690d
								
							
						
					
					
						commit
						26b36df751
					
				
					 2 changed files with 47 additions and 11 deletions
				
			
		| 
						 | 
				
			
			@ -1302,14 +1302,26 @@ static void clk_core_disable_unprepare(struct clk_core *core)
 | 
			
		|||
	clk_core_unprepare_lock(core);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void __init clk_unprepare_unused_subtree(struct clk_core *core)
 | 
			
		||||
static void clk_unprepare_unused_subtree(struct clk_core *core,
 | 
			
		||||
						struct device *dev)
 | 
			
		||||
{
 | 
			
		||||
	bool from_sync_state = !!dev;
 | 
			
		||||
	struct clk_core *child;
 | 
			
		||||
 | 
			
		||||
	lockdep_assert_held(&prepare_lock);
 | 
			
		||||
 | 
			
		||||
	hlist_for_each_entry(child, &core->children, child_node)
 | 
			
		||||
		clk_unprepare_unused_subtree(child);
 | 
			
		||||
		clk_unprepare_unused_subtree(child, dev);
 | 
			
		||||
 | 
			
		||||
	if (from_sync_state && core->dev != dev)
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * clock will be unprepared on sync_state,
 | 
			
		||||
	 * so leave as is for now
 | 
			
		||||
	 */
 | 
			
		||||
	if (!from_sync_state && dev_has_sync_state(core->dev))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	if (core->prepare_count)
 | 
			
		||||
		return;
 | 
			
		||||
| 
						 | 
				
			
			@ -1332,15 +1344,27 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
 | 
			
		|||
	clk_pm_runtime_put(core);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void __init clk_disable_unused_subtree(struct clk_core *core)
 | 
			
		||||
static void clk_disable_unused_subtree(struct clk_core *core,
 | 
			
		||||
					struct device *dev)
 | 
			
		||||
{
 | 
			
		||||
	bool from_sync_state = !!dev;
 | 
			
		||||
	struct clk_core *child;
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
 | 
			
		||||
	lockdep_assert_held(&prepare_lock);
 | 
			
		||||
 | 
			
		||||
	hlist_for_each_entry(child, &core->children, child_node)
 | 
			
		||||
		clk_disable_unused_subtree(child);
 | 
			
		||||
		clk_disable_unused_subtree(child, dev);
 | 
			
		||||
 | 
			
		||||
	if (from_sync_state && core->dev != dev)
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * clock will be disabled on sync_state,
 | 
			
		||||
	 * so leave as is for now
 | 
			
		||||
	 */
 | 
			
		||||
	if (!from_sync_state && dev_has_sync_state(core->dev))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	if (core->flags & CLK_OPS_PARENT_ENABLE)
 | 
			
		||||
		clk_core_prepare_enable(core->parent);
 | 
			
		||||
| 
						 | 
				
			
			@ -1378,7 +1402,7 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
 | 
			
		|||
		clk_core_disable_unprepare(core->parent);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static bool clk_ignore_unused __initdata;
 | 
			
		||||
static bool clk_ignore_unused;
 | 
			
		||||
static int __init clk_ignore_unused_setup(char *__unused)
 | 
			
		||||
{
 | 
			
		||||
	clk_ignore_unused = true;
 | 
			
		||||
| 
						 | 
				
			
			@ -1386,35 +1410,46 @@ static int __init clk_ignore_unused_setup(char *__unused)
 | 
			
		|||
}
 | 
			
		||||
__setup("clk_ignore_unused", clk_ignore_unused_setup);
 | 
			
		||||
 | 
			
		||||
static int __init clk_disable_unused(void)
 | 
			
		||||
static void __clk_disable_unused(struct device *dev)
 | 
			
		||||
{
 | 
			
		||||
	struct clk_core *core;
 | 
			
		||||
 | 
			
		||||
	if (clk_ignore_unused) {
 | 
			
		||||
		pr_warn("clk: Not disabling unused clocks\n");
 | 
			
		||||
		return 0;
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	clk_prepare_lock();
 | 
			
		||||
 | 
			
		||||
	hlist_for_each_entry(core, &clk_root_list, child_node)
 | 
			
		||||
		clk_disable_unused_subtree(core);
 | 
			
		||||
		clk_disable_unused_subtree(core, dev);
 | 
			
		||||
 | 
			
		||||
	hlist_for_each_entry(core, &clk_orphan_list, child_node)
 | 
			
		||||
		clk_disable_unused_subtree(core);
 | 
			
		||||
		clk_disable_unused_subtree(core, dev);
 | 
			
		||||
 | 
			
		||||
	hlist_for_each_entry(core, &clk_root_list, child_node)
 | 
			
		||||
		clk_unprepare_unused_subtree(core);
 | 
			
		||||
		clk_unprepare_unused_subtree(core, dev);
 | 
			
		||||
 | 
			
		||||
	hlist_for_each_entry(core, &clk_orphan_list, child_node)
 | 
			
		||||
		clk_unprepare_unused_subtree(core);
 | 
			
		||||
		clk_unprepare_unused_subtree(core, dev);
 | 
			
		||||
 | 
			
		||||
	clk_prepare_unlock();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int __init clk_disable_unused(void)
 | 
			
		||||
{
 | 
			
		||||
	__clk_disable_unused(NULL);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
late_initcall_sync(clk_disable_unused);
 | 
			
		||||
 | 
			
		||||
void clk_sync_state_disable_unused(struct device *dev)
 | 
			
		||||
{
 | 
			
		||||
	__clk_disable_unused(dev);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(clk_sync_state_disable_unused);
 | 
			
		||||
 | 
			
		||||
static int clk_core_determine_round_nolock(struct clk_core *core,
 | 
			
		||||
					   struct clk_rate_request *req)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -720,6 +720,7 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
 | 
			
		|||
		void __iomem *reg, u8 shift, u8 width,
 | 
			
		||||
		u8 clk_divider_flags, const struct clk_div_table *table,
 | 
			
		||||
		spinlock_t *lock);
 | 
			
		||||
void clk_sync_state_disable_unused(struct device *dev);
 | 
			
		||||
/**
 | 
			
		||||
 * clk_register_divider - register a divider clock with the clock framework
 | 
			
		||||
 * @dev: device registering this clock
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue