mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 00:28:52 +02:00 
			
		
		
		
	 caa759323c
			
		
	
	
		caa759323c
		
	
	
	
	
		
			
			The return value is fixed. Remove it and amend the callers. [ tglx: Fixup arm/bL_switcher and powerpc/rtas ] Signed-off-by: Nadav Amit <namit@vmware.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Matt Turner <mattst88@gmail.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Link: https://lkml.kernel.org/r/20190613064813.8102-2-namit@vmware.com
		
			
				
	
	
		
			110 lines
		
	
	
	
		
			2.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			110 lines
		
	
	
	
		
			2.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| // SPDX-License-Identifier: GPL-2.0-only
 | |
| /*
 | |
|  * Uniprocessor-only support functions.  The counterpart to kernel/smp.c
 | |
|  */
 | |
| 
 | |
| #include <linux/interrupt.h>
 | |
| #include <linux/kernel.h>
 | |
| #include <linux/export.h>
 | |
| #include <linux/smp.h>
 | |
| #include <linux/hypervisor.h>
 | |
| 
 | |
| int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
 | |
| 				int wait)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	WARN_ON(cpu != 0);
 | |
| 
 | |
| 	local_irq_save(flags);
 | |
| 	func(info);
 | |
| 	local_irq_restore(flags);
 | |
| 
 | |
| 	return 0;
 | |
| }
 | |
| EXPORT_SYMBOL(smp_call_function_single);
 | |
| 
 | |
| int smp_call_function_single_async(int cpu, call_single_data_t *csd)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	local_irq_save(flags);
 | |
| 	csd->func(csd->info);
 | |
| 	local_irq_restore(flags);
 | |
| 	return 0;
 | |
| }
 | |
| EXPORT_SYMBOL(smp_call_function_single_async);
 | |
| 
 | |
| void on_each_cpu(smp_call_func_t func, void *info, int wait)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	local_irq_save(flags);
 | |
| 	func(info);
 | |
| 	local_irq_restore(flags);
 | |
| }
 | |
| EXPORT_SYMBOL(on_each_cpu);
 | |
| 
 | |
| /*
 | |
|  * Note we still need to test the mask even for UP
 | |
|  * because we actually can get an empty mask from
 | |
|  * code that on SMP might call us without the local
 | |
|  * CPU in the mask.
 | |
|  */
 | |
| void on_each_cpu_mask(const struct cpumask *mask,
 | |
| 		      smp_call_func_t func, void *info, bool wait)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	if (cpumask_test_cpu(0, mask)) {
 | |
| 		local_irq_save(flags);
 | |
| 		func(info);
 | |
| 		local_irq_restore(flags);
 | |
| 	}
 | |
| }
 | |
| EXPORT_SYMBOL(on_each_cpu_mask);
 | |
| 
 | |
| /*
 | |
|  * Preemption is disabled here to make sure the cond_func is called under the
 | |
|  * same condtions in UP and SMP.
 | |
|  */
 | |
| void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
 | |
| 			   smp_call_func_t func, void *info, bool wait,
 | |
| 			   gfp_t gfp_flags, const struct cpumask *mask)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	preempt_disable();
 | |
| 	if (cond_func(0, info)) {
 | |
| 		local_irq_save(flags);
 | |
| 		func(info);
 | |
| 		local_irq_restore(flags);
 | |
| 	}
 | |
| 	preempt_enable();
 | |
| }
 | |
| EXPORT_SYMBOL(on_each_cpu_cond_mask);
 | |
| 
 | |
| void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
 | |
| 		      smp_call_func_t func, void *info, bool wait,
 | |
| 		      gfp_t gfp_flags)
 | |
| {
 | |
| 	on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL);
 | |
| }
 | |
| EXPORT_SYMBOL(on_each_cpu_cond);
 | |
| 
 | |
| int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
 | |
| {
 | |
| 	int ret;
 | |
| 
 | |
| 	if (cpu != 0)
 | |
| 		return -ENXIO;
 | |
| 
 | |
| 	if (phys)
 | |
| 		hypervisor_pin_vcpu(0);
 | |
| 	ret = func(par);
 | |
| 	if (phys)
 | |
| 		hypervisor_pin_vcpu(-1);
 | |
| 
 | |
| 	return ret;
 | |
| }
 | |
| EXPORT_SYMBOL_GPL(smp_call_on_cpu);
 |