mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 08:38:45 +02:00 
			
		
		
		
	 fa688207c9
			
		
	
	
		fa688207c9
		
	
	
	
	
		
			
			As in commit f21afc25f9 ("smp.h: Use local_irq_{save,restore}() in
!SMP version of on_each_cpu()"), we don't want to enable irqs if they
are not already enabled.  There are currently no known problematical
callers of these functions, but since it is a known failure pattern, we
preemptively fix them.
Since they are not trivial functions, make them non-inline by moving
them to up.c.  This also makes it so we don't have to fix #include
dependancies for preempt_{disable,enable}.
Signed-off-by: David Daney <david.daney@cavium.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
	
			
		
			
				
	
	
		
			60 lines
		
	
	
	
		
			1.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			60 lines
		
	
	
	
		
			1.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  * Uniprocessor-only support functions.  The counterpart to kernel/smp.c
 | |
|  */
 | |
| 
 | |
| #include <linux/interrupt.h>
 | |
| #include <linux/kernel.h>
 | |
| #include <linux/export.h>
 | |
| #include <linux/smp.h>
 | |
| 
 | |
| int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
 | |
| 				int wait)
 | |
| {
 | |
| 	WARN_ON(cpu != 0);
 | |
| 
 | |
| 	local_irq_disable();
 | |
| 	(func)(info);
 | |
| 	local_irq_enable();
 | |
| 
 | |
| 	return 0;
 | |
| }
 | |
| EXPORT_SYMBOL(smp_call_function_single);
 | |
| 
 | |
| /*
 | |
|  * Note we still need to test the mask even for UP
 | |
|  * because we actually can get an empty mask from
 | |
|  * code that on SMP might call us without the local
 | |
|  * CPU in the mask.
 | |
|  */
 | |
| void on_each_cpu_mask(const struct cpumask *mask,
 | |
| 		      smp_call_func_t func, void *info, bool wait)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	if (cpumask_test_cpu(0, mask)) {
 | |
| 		local_irq_save(flags);
 | |
| 		func(info);
 | |
| 		local_irq_restore(flags);
 | |
| 	}
 | |
| }
 | |
| EXPORT_SYMBOL(on_each_cpu_mask);
 | |
| 
 | |
| /*
 | |
|  * Preemption is disabled here to make sure the cond_func is called under the
 | |
|  * same condtions in UP and SMP.
 | |
|  */
 | |
| void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
 | |
| 		      smp_call_func_t func, void *info, bool wait,
 | |
| 		      gfp_t gfp_flags)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	preempt_disable();
 | |
| 	if (cond_func(0, info)) {
 | |
| 		local_irq_save(flags);
 | |
| 		func(info);
 | |
| 		local_irq_restore(flags);
 | |
| 	}
 | |
| 	preempt_enable();
 | |
| }
 | |
| EXPORT_SYMBOL(on_each_cpu_cond);
 |