forked from mirrors/linux
		
	smp: quit unconditionally enabling irq in on_each_cpu_mask and on_each_cpu_cond
As in commit f21afc25f9 ("smp.h: Use local_irq_{save,restore}() in
!SMP version of on_each_cpu()"), we don't want to enable irqs if they
are not already enabled.  There are currently no known problematical
callers of these functions, but since it is a known failure pattern, we
preemptively fix them.
Since they are not trivial functions, make them non-inline by moving
them to up.c.  This also makes it so we don't have to fix #include
dependancies for preempt_{disable,enable}.
Signed-off-by: David Daney <david.daney@cavium.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
			
			
This commit is contained in:
		
							parent
							
								
									c14c338cb0
								
							
						
					
					
						commit
						fa688207c9
					
				
					 2 changed files with 55 additions and 46 deletions
				
			
		|  | @ -29,6 +29,22 @@ extern unsigned int total_cpus; | ||||||
| int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, | int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, | ||||||
| 			     int wait); | 			     int wait); | ||||||
| 
 | 
 | ||||||
|  | /*
 | ||||||
|  |  * Call a function on processors specified by mask, which might include | ||||||
|  |  * the local one. | ||||||
|  |  */ | ||||||
|  | void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, | ||||||
|  | 		void *info, bool wait); | ||||||
|  | 
 | ||||||
|  | /*
 | ||||||
|  |  * Call a function on each processor for which the supplied function | ||||||
|  |  * cond_func returns a positive value. This may include the local | ||||||
|  |  * processor. | ||||||
|  |  */ | ||||||
|  | void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), | ||||||
|  | 		smp_call_func_t func, void *info, bool wait, | ||||||
|  | 		gfp_t gfp_flags); | ||||||
|  | 
 | ||||||
| #ifdef CONFIG_SMP | #ifdef CONFIG_SMP | ||||||
| 
 | 
 | ||||||
| #include <linux/preempt.h> | #include <linux/preempt.h> | ||||||
|  | @ -100,22 +116,6 @@ static inline void call_function_init(void) { } | ||||||
|  */ |  */ | ||||||
| int on_each_cpu(smp_call_func_t func, void *info, int wait); | int on_each_cpu(smp_call_func_t func, void *info, int wait); | ||||||
| 
 | 
 | ||||||
| /*
 |  | ||||||
|  * Call a function on processors specified by mask, which might include |  | ||||||
|  * the local one. |  | ||||||
|  */ |  | ||||||
| void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, |  | ||||||
| 		void *info, bool wait); |  | ||||||
| 
 |  | ||||||
| /*
 |  | ||||||
|  * Call a function on each processor for which the supplied function |  | ||||||
|  * cond_func returns a positive value. This may include the local |  | ||||||
|  * processor. |  | ||||||
|  */ |  | ||||||
| void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), |  | ||||||
| 		smp_call_func_t func, void *info, bool wait, |  | ||||||
| 		gfp_t gfp_flags); |  | ||||||
| 
 |  | ||||||
| /*
 | /*
 | ||||||
|  * Mark the boot cpu "online" so that it can call console drivers in |  * Mark the boot cpu "online" so that it can call console drivers in | ||||||
|  * printk() and can access its per-cpu storage. |  * printk() and can access its per-cpu storage. | ||||||
|  | @ -151,36 +151,6 @@ static inline int on_each_cpu(smp_call_func_t func, void *info, int wait) | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 |  | ||||||
|  * Note we still need to test the mask even for UP |  | ||||||
|  * because we actually can get an empty mask from |  | ||||||
|  * code that on SMP might call us without the local |  | ||||||
|  * CPU in the mask. |  | ||||||
|  */ |  | ||||||
| #define on_each_cpu_mask(mask, func, info, wait) \ |  | ||||||
| 	do {						\ |  | ||||||
| 		if (cpumask_test_cpu(0, (mask))) {	\ |  | ||||||
| 			local_irq_disable();		\ |  | ||||||
| 			(func)(info);			\ |  | ||||||
| 			local_irq_enable();		\ |  | ||||||
| 		}					\ |  | ||||||
| 	} while (0) |  | ||||||
| /*
 |  | ||||||
|  * Preemption is disabled here to make sure the cond_func is called under the |  | ||||||
|  * same condtions in UP and SMP. |  | ||||||
|  */ |  | ||||||
| #define on_each_cpu_cond(cond_func, func, info, wait, gfp_flags)\ |  | ||||||
| 	do {							\ |  | ||||||
| 		void *__info = (info);				\ |  | ||||||
| 		preempt_disable();				\ |  | ||||||
| 		if ((cond_func)(0, __info)) {			\ |  | ||||||
| 			local_irq_disable();			\ |  | ||||||
| 			(func)(__info);				\ |  | ||||||
| 			local_irq_enable();			\ |  | ||||||
| 		}						\ |  | ||||||
| 		preempt_enable();				\ |  | ||||||
| 	} while (0) |  | ||||||
| 
 |  | ||||||
| static inline void smp_send_reschedule(int cpu) { } | static inline void smp_send_reschedule(int cpu) { } | ||||||
| #define smp_prepare_boot_cpu()			do {} while (0) | #define smp_prepare_boot_cpu()			do {} while (0) | ||||||
| #define smp_call_function_many(mask, func, info, wait) \ | #define smp_call_function_many(mask, func, info, wait) \ | ||||||
|  |  | ||||||
							
								
								
									
										39
									
								
								kernel/up.c
									
									
									
									
									
								
							
							
						
						
									
										39
									
								
								kernel/up.c
									
									
									
									
									
								
							|  | @ -19,3 +19,42 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(smp_call_function_single); | EXPORT_SYMBOL(smp_call_function_single); | ||||||
|  | 
 | ||||||
|  | /*
 | ||||||
|  |  * Note we still need to test the mask even for UP | ||||||
|  |  * because we actually can get an empty mask from | ||||||
|  |  * code that on SMP might call us without the local | ||||||
|  |  * CPU in the mask. | ||||||
|  |  */ | ||||||
|  | void on_each_cpu_mask(const struct cpumask *mask, | ||||||
|  | 		      smp_call_func_t func, void *info, bool wait) | ||||||
|  | { | ||||||
|  | 	unsigned long flags; | ||||||
|  | 
 | ||||||
|  | 	if (cpumask_test_cpu(0, mask)) { | ||||||
|  | 		local_irq_save(flags); | ||||||
|  | 		func(info); | ||||||
|  | 		local_irq_restore(flags); | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | EXPORT_SYMBOL(on_each_cpu_mask); | ||||||
|  | 
 | ||||||
|  | /*
 | ||||||
|  |  * Preemption is disabled here to make sure the cond_func is called under the | ||||||
|  |  * same condtions in UP and SMP. | ||||||
|  |  */ | ||||||
|  | void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), | ||||||
|  | 		      smp_call_func_t func, void *info, bool wait, | ||||||
|  | 		      gfp_t gfp_flags) | ||||||
|  | { | ||||||
|  | 	unsigned long flags; | ||||||
|  | 
 | ||||||
|  | 	preempt_disable(); | ||||||
|  | 	if (cond_func(0, info)) { | ||||||
|  | 		local_irq_save(flags); | ||||||
|  | 		func(info); | ||||||
|  | 		local_irq_restore(flags); | ||||||
|  | 	} | ||||||
|  | 	preempt_enable(); | ||||||
|  | } | ||||||
|  | EXPORT_SYMBOL(on_each_cpu_cond); | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 David Daney
						David Daney