mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	smp: Remove smp_call_function() and on_each_cpu() return values
The return value is fixed. Remove it and amend the callers. [ tglx: Fixup arm/bL_switcher and powerpc/rtas ] Signed-off-by: Nadav Amit <namit@vmware.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Matt Turner <mattst88@gmail.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Link: https://lkml.kernel.org/r/20190613064813.8102-2-namit@vmware.com
This commit is contained in:
		
							parent
							
								
									a22793c79d
								
							
						
					
					
						commit
						caa759323c
					
				
					 11 changed files with 27 additions and 53 deletions
				
			
		|  | @ -614,8 +614,7 @@ void | |||
| smp_imb(void) | ||||
| { | ||||
| 	/* Must wait other processors to flush their icache before continue. */ | ||||
| 	if (on_each_cpu(ipi_imb, NULL, 1)) | ||||
| 		printk(KERN_CRIT "smp_imb: timed out\n"); | ||||
| 	on_each_cpu(ipi_imb, NULL, 1); | ||||
| } | ||||
| EXPORT_SYMBOL(smp_imb); | ||||
| 
 | ||||
|  | @ -630,9 +629,7 @@ flush_tlb_all(void) | |||
| { | ||||
| 	/* Although we don't have any data to pass, we do want to
 | ||||
| 	   synchronize with the other processors.  */ | ||||
| 	if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) { | ||||
| 		printk(KERN_CRIT "flush_tlb_all: timed out\n"); | ||||
| 	} | ||||
| 	on_each_cpu(ipi_flush_tlb_all, NULL, 1); | ||||
| } | ||||
| 
 | ||||
| #define asn_locked() (cpu_data[smp_processor_id()].asn_lock) | ||||
|  | @ -667,9 +664,7 @@ flush_tlb_mm(struct mm_struct *mm) | |||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) { | ||||
| 		printk(KERN_CRIT "flush_tlb_mm: timed out\n"); | ||||
| 	} | ||||
| 	smp_call_function(ipi_flush_tlb_mm, mm, 1); | ||||
| 
 | ||||
| 	preempt_enable(); | ||||
| } | ||||
|  | @ -720,9 +715,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) | |||
| 	data.mm = mm; | ||||
| 	data.addr = addr; | ||||
| 
 | ||||
| 	if (smp_call_function(ipi_flush_tlb_page, &data, 1)) { | ||||
| 		printk(KERN_CRIT "flush_tlb_page: timed out\n"); | ||||
| 	} | ||||
| 	smp_call_function(ipi_flush_tlb_page, &data, 1); | ||||
| 
 | ||||
| 	preempt_enable(); | ||||
| } | ||||
|  | @ -772,9 +765,7 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | |||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if (smp_call_function(ipi_flush_icache_page, mm, 1)) { | ||||
| 		printk(KERN_CRIT "flush_icache_page: timed out\n"); | ||||
| 	} | ||||
| 	smp_call_function(ipi_flush_icache_page, mm, 1); | ||||
| 
 | ||||
| 	preempt_enable(); | ||||
| } | ||||
|  |  | |||
|  | @ -65,7 +65,7 @@ op_axp_setup(void) | |||
| 	model->reg_setup(®, ctr, &sys); | ||||
| 
 | ||||
| 	/* Configure the registers on all cpus.  */ | ||||
| 	(void)smp_call_function(model->cpu_setup, ®, 1); | ||||
| 	smp_call_function(model->cpu_setup, ®, 1); | ||||
| 	model->cpu_setup(®); | ||||
| 	return 0; | ||||
| } | ||||
|  | @ -86,7 +86,7 @@ op_axp_cpu_start(void *dummy) | |||
| static int | ||||
| op_axp_start(void) | ||||
| { | ||||
| 	(void)smp_call_function(op_axp_cpu_start, NULL, 1); | ||||
| 	smp_call_function(op_axp_cpu_start, NULL, 1); | ||||
| 	op_axp_cpu_start(NULL); | ||||
| 	return 0; | ||||
| } | ||||
|  | @ -101,7 +101,7 @@ op_axp_cpu_stop(void *dummy) | |||
| static void | ||||
| op_axp_stop(void) | ||||
| { | ||||
| 	(void)smp_call_function(op_axp_cpu_stop, NULL, 1); | ||||
| 	smp_call_function(op_axp_cpu_stop, NULL, 1); | ||||
| 	op_axp_cpu_stop(NULL); | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -542,16 +542,14 @@ static void bL_switcher_trace_trigger_cpu(void *__always_unused info) | |||
| 
 | ||||
| int bL_switcher_trace_trigger(void) | ||||
| { | ||||
| 	int ret; | ||||
| 
 | ||||
| 	preempt_disable(); | ||||
| 
 | ||||
| 	bL_switcher_trace_trigger_cpu(NULL); | ||||
| 	ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true); | ||||
| 	smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true); | ||||
| 
 | ||||
| 	preempt_enable(); | ||||
| 
 | ||||
| 	return ret; | ||||
| 	return 0; | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger); | ||||
| 
 | ||||
|  |  | |||
|  | @ -6390,11 +6390,7 @@ pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) | |||
| 	} | ||||
| 
 | ||||
| 	/* save the current system wide pmu states */ | ||||
| 	ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1); | ||||
| 	if (ret) { | ||||
| 		DPRINT(("on_each_cpu() failed: %d\n", ret)); | ||||
| 		goto cleanup_reserve; | ||||
| 	} | ||||
| 	on_each_cpu(pfm_alt_save_pmu_state, NULL, 1); | ||||
| 
 | ||||
| 	/* officially change to the alternate interrupt handler */ | ||||
| 	pfm_alt_intr_handler = hdl; | ||||
|  | @ -6421,7 +6417,6 @@ int | |||
| pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) | ||||
| { | ||||
| 	int i; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	if (hdl == NULL) return -EINVAL; | ||||
| 
 | ||||
|  | @ -6435,10 +6430,7 @@ pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) | |||
| 
 | ||||
| 	pfm_alt_intr_handler = NULL; | ||||
| 
 | ||||
| 	ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1); | ||||
| 	if (ret) { | ||||
| 		DPRINT(("on_each_cpu() failed: %d\n", ret)); | ||||
| 	} | ||||
| 	on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1); | ||||
| 
 | ||||
| 	for_each_online_cpu(i) { | ||||
| 		pfm_unreserve_session(NULL, 1, i); | ||||
|  |  | |||
|  | @ -124,8 +124,8 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) | |||
| 	status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); | ||||
| 	if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { | ||||
| 		atomic_set(&uc_pool->status, 0); | ||||
| 		status = smp_call_function(uncached_ipi_visibility, uc_pool, 1); | ||||
| 		if (status || atomic_read(&uc_pool->status)) | ||||
| 		smp_call_function(uncached_ipi_visibility, uc_pool, 1); | ||||
| 		if (atomic_read(&uc_pool->status)) | ||||
| 			goto failed; | ||||
| 	} else if (status != PAL_VISIBILITY_OK) | ||||
| 		goto failed; | ||||
|  | @ -146,8 +146,8 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) | |||
| 	if (status != PAL_STATUS_SUCCESS) | ||||
| 		goto failed; | ||||
| 	atomic_set(&uc_pool->status, 0); | ||||
| 	status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1); | ||||
| 	if (status || atomic_read(&uc_pool->status)) | ||||
| 	smp_call_function(uncached_ipi_mc_drain, uc_pool, 1); | ||||
| 	if (atomic_read(&uc_pool->status)) | ||||
| 		goto failed; | ||||
| 
 | ||||
| 	/*
 | ||||
|  |  | |||
|  | @ -994,8 +994,7 @@ int rtas_ibm_suspend_me(u64 handle) | |||
| 	/* Call function on all CPUs.  One of us will make the
 | ||||
| 	 * rtas call | ||||
| 	 */ | ||||
| 	if (on_each_cpu(rtas_percpu_suspend_me, &data, 0)) | ||||
| 		atomic_set(&data.error, -EINVAL); | ||||
| 	on_each_cpu(rtas_percpu_suspend_me, &data, 0); | ||||
| 
 | ||||
| 	wait_for_completion(&done); | ||||
| 
 | ||||
|  |  | |||
|  | @ -15,6 +15,7 @@ EXPORT_SYMBOL(wbinvd_on_cpu); | |||
| 
 | ||||
| int wbinvd_on_all_cpus(void) | ||||
| { | ||||
| 	return on_each_cpu(__wbinvd, NULL, 1); | ||||
| 	on_each_cpu(__wbinvd, NULL, 1); | ||||
| 	return 0; | ||||
| } | ||||
| EXPORT_SYMBOL(wbinvd_on_all_cpus); | ||||
|  |  | |||
|  | @ -1311,8 +1311,7 @@ static void ipi_handler(void *null) | |||
| 
 | ||||
| void global_cache_flush(void) | ||||
| { | ||||
| 	if (on_each_cpu(ipi_handler, NULL, 1) != 0) | ||||
| 		panic(PFX "timed out waiting for the other CPUs!\n"); | ||||
| 	on_each_cpu(ipi_handler, NULL, 1); | ||||
| } | ||||
| EXPORT_SYMBOL(global_cache_flush); | ||||
| 
 | ||||
|  |  | |||
|  | @ -35,7 +35,7 @@ int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, | |||
| /*
 | ||||
|  * Call a function on all processors | ||||
|  */ | ||||
| int on_each_cpu(smp_call_func_t func, void *info, int wait); | ||||
| void on_each_cpu(smp_call_func_t func, void *info, int wait); | ||||
| 
 | ||||
| /*
 | ||||
|  * Call a function on processors specified by mask, which might include | ||||
|  | @ -101,7 +101,7 @@ extern void smp_cpus_done(unsigned int max_cpus); | |||
| /*
 | ||||
|  * Call a function on all other processors | ||||
|  */ | ||||
| int smp_call_function(smp_call_func_t func, void *info, int wait); | ||||
| void smp_call_function(smp_call_func_t func, void *info, int wait); | ||||
| void smp_call_function_many(const struct cpumask *mask, | ||||
| 			    smp_call_func_t func, void *info, bool wait); | ||||
| 
 | ||||
|  | @ -144,9 +144,8 @@ static inline void smp_send_stop(void) { } | |||
|  *	These macros fold the SMP functionality into a single CPU system | ||||
|  */ | ||||
| #define raw_smp_processor_id()			0 | ||||
| static inline int up_smp_call_function(smp_call_func_t func, void *info) | ||||
| static inline void up_smp_call_function(smp_call_func_t func, void *info) | ||||
| { | ||||
| 	return 0; | ||||
| } | ||||
| #define smp_call_function(func, info, wait) \ | ||||
| 			(up_smp_call_function(func, info)) | ||||
|  |  | |||
							
								
								
									
										10
									
								
								kernel/smp.c
									
									
									
									
									
								
							
							
						
						
									
										10
									
								
								kernel/smp.c
									
									
									
									
									
								
							|  | @ -487,13 +487,11 @@ EXPORT_SYMBOL(smp_call_function_many); | |||
|  * You must not call this function with disabled interrupts or from a | ||||
|  * hardware interrupt handler or from a bottom half handler. | ||||
|  */ | ||||
| int smp_call_function(smp_call_func_t func, void *info, int wait) | ||||
| void smp_call_function(smp_call_func_t func, void *info, int wait) | ||||
| { | ||||
| 	preempt_disable(); | ||||
| 	smp_call_function_many(cpu_online_mask, func, info, wait); | ||||
| 	preempt_enable(); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| EXPORT_SYMBOL(smp_call_function); | ||||
| 
 | ||||
|  | @ -594,18 +592,16 @@ void __init smp_init(void) | |||
|  * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead | ||||
|  * of local_irq_disable/enable(). | ||||
|  */ | ||||
| int on_each_cpu(void (*func) (void *info), void *info, int wait) | ||||
| void on_each_cpu(void (*func) (void *info), void *info, int wait) | ||||
| { | ||||
| 	unsigned long flags; | ||||
| 	int ret = 0; | ||||
| 
 | ||||
| 	preempt_disable(); | ||||
| 	ret = smp_call_function(func, info, wait); | ||||
| 	smp_call_function(func, info, wait); | ||||
| 	local_irq_save(flags); | ||||
| 	func(info); | ||||
| 	local_irq_restore(flags); | ||||
| 	preempt_enable(); | ||||
| 	return ret; | ||||
| } | ||||
| EXPORT_SYMBOL(on_each_cpu); | ||||
| 
 | ||||
|  |  | |||
|  | @ -35,14 +35,13 @@ int smp_call_function_single_async(int cpu, call_single_data_t *csd) | |||
| } | ||||
| EXPORT_SYMBOL(smp_call_function_single_async); | ||||
| 
 | ||||
| int on_each_cpu(smp_call_func_t func, void *info, int wait) | ||||
| void on_each_cpu(smp_call_func_t func, void *info, int wait) | ||||
| { | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	local_irq_save(flags); | ||||
| 	func(info); | ||||
| 	local_irq_restore(flags); | ||||
| 	return 0; | ||||
| } | ||||
| EXPORT_SYMBOL(on_each_cpu); | ||||
| 
 | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Nadav Amit
						Nadav Amit