mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 08:38:45 +02:00 
			
		
		
		
	exit: Rename complete_and_exit to kthread_complete_and_exit
Update complete_and_exit to call kthread_exit instead of do_exit. Change the name to reflect this change in functionality. All of the users of complete_and_exit are causing the current kthread to exit so this change makes it clear what is happening. Move the implementation of kthread_complete_and_exit from kernel/exit.c to to kernel/kthread.c. As this function is kthread specific it makes most sense to live with the kthread functions. There are no functional change. Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
This commit is contained in:
		
							parent
							
								
									ca3574bd65
								
							
						
					
					
						commit
						cead185526
					
				
					 15 changed files with 43 additions and 31 deletions
				
			
		|  | @ -63,7 +63,7 @@ static void rsi_coex_scheduler_thread(struct rsi_common *common) | |||
| 		rsi_coex_sched_tx_pkts(coex_cb); | ||||
| 	} while (atomic_read(&coex_cb->coex_tx_thread.thread_done) == 0); | ||||
| 
 | ||||
| 	complete_and_exit(&coex_cb->coex_tx_thread.completion, 0); | ||||
| 	kthread_complete_and_exit(&coex_cb->coex_tx_thread.completion, 0); | ||||
| } | ||||
| 
 | ||||
| int rsi_coex_recv_pkt(struct rsi_common *common, u8 *msg) | ||||
|  |  | |||
|  | @ -260,7 +260,7 @@ static void rsi_tx_scheduler_thread(struct rsi_common *common) | |||
| 		if (common->init_done) | ||||
| 			rsi_core_qos_processor(common); | ||||
| 	} while (atomic_read(&common->tx_thread.thread_done) == 0); | ||||
| 	complete_and_exit(&common->tx_thread.completion, 0); | ||||
| 	kthread_complete_and_exit(&common->tx_thread.completion, 0); | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_RSI_COEX | ||||
|  |  | |||
|  | @ -75,7 +75,7 @@ void rsi_sdio_rx_thread(struct rsi_common *common) | |||
| 
 | ||||
| 	rsi_dbg(INFO_ZONE, "%s: Terminated SDIO RX thread\n", __func__); | ||||
| 	atomic_inc(&sdev->rx_thread.thread_done); | ||||
| 	complete_and_exit(&sdev->rx_thread.completion, 0); | ||||
| 	kthread_complete_and_exit(&sdev->rx_thread.completion, 0); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  |  | |||
|  | @ -56,6 +56,6 @@ void rsi_usb_rx_thread(struct rsi_common *common) | |||
| out: | ||||
| 	rsi_dbg(INFO_ZONE, "%s: Terminated thread\n", __func__); | ||||
| 	skb_queue_purge(&dev->rx_q); | ||||
| 	complete_and_exit(&dev->rx_thread.completion, 0); | ||||
| 	kthread_complete_and_exit(&dev->rx_thread.completion, 0); | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -160,7 +160,7 @@ static int pnp_dock_thread(void *unused) | |||
| 			 * No dock to manage | ||||
| 			 */ | ||||
| 		case PNP_FUNCTION_NOT_SUPPORTED: | ||||
| 			complete_and_exit(&unload_sem, 0); | ||||
| 			kthread_complete_and_exit(&unload_sem, 0); | ||||
| 		case PNP_SYSTEM_NOT_DOCKED: | ||||
| 			d = 0; | ||||
| 			break; | ||||
|  | @ -170,7 +170,7 @@ static int pnp_dock_thread(void *unused) | |||
| 		default: | ||||
| 			pnpbios_print_status("pnp_dock_thread", status); | ||||
| 			printk(KERN_WARNING "PnPBIOS: disabling dock monitoring.\n"); | ||||
| 			complete_and_exit(&unload_sem, 0); | ||||
| 			kthread_complete_and_exit(&unload_sem, 0); | ||||
| 		} | ||||
| 		if (d != docked) { | ||||
| 			if (pnp_dock_event(d, &now) == 0) { | ||||
|  | @ -183,7 +183,7 @@ static int pnp_dock_thread(void *unused) | |||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	complete_and_exit(&unload_sem, 0); | ||||
| 	kthread_complete_and_exit(&unload_sem, 0); | ||||
| } | ||||
| 
 | ||||
| static int pnpbios_get_resources(struct pnp_dev *dev) | ||||
|  |  | |||
|  | @ -450,13 +450,13 @@ static int rtsx_control_thread(void *__dev) | |||
| 	 * after the down() -- that's necessary for the thread-shutdown | ||||
| 	 * case. | ||||
| 	 * | ||||
| 	 * complete_and_exit() goes even further than this -- it is safe in | ||||
| 	 * the case that the thread of the caller is going away (not just | ||||
| 	 * the structure) -- this is necessary for the module-remove case. | ||||
| 	 * This is important in preemption kernels, which transfer the flow | ||||
| 	 * of execution immediately upon a complete(). | ||||
| 	 * kthread_complete_and_exit() goes even further than this -- | ||||
| 	 * it is safe in the case that the thread of the caller is going away | ||||
| 	 * (not just the structure) -- this is necessary for the module-remove | ||||
| 	 * case.  This is important in preemption kernels, which transfer the | ||||
| 	 * flow of execution immediately upon a complete(). | ||||
| 	 */ | ||||
| 	complete_and_exit(&dev->control_exit, 0); | ||||
| 	kthread_complete_and_exit(&dev->control_exit, 0); | ||||
| } | ||||
| 
 | ||||
| static int rtsx_polling_thread(void *__dev) | ||||
|  | @ -501,7 +501,7 @@ static int rtsx_polling_thread(void *__dev) | |||
| 		mutex_unlock(&dev->dev_mutex); | ||||
| 	} | ||||
| 
 | ||||
| 	complete_and_exit(&dev->polling_exit, 0); | ||||
| 	kthread_complete_and_exit(&dev->polling_exit, 0); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | @ -682,7 +682,7 @@ static int rtsx_scan_thread(void *__dev) | |||
| 		/* Should we unbind if no devices were detected? */ | ||||
| 	} | ||||
| 
 | ||||
| 	complete_and_exit(&dev->scanning_done, 0); | ||||
| 	kthread_complete_and_exit(&dev->scanning_done, 0); | ||||
| } | ||||
| 
 | ||||
| static void rtsx_init_options(struct rtsx_chip *chip) | ||||
|  |  | |||
|  | @ -969,7 +969,7 @@ static int usbatm_do_heavy_init(void *arg) | |||
| 	instance->thread = NULL; | ||||
| 	mutex_unlock(&instance->serialize); | ||||
| 
 | ||||
| 	complete_and_exit(&instance->thread_exited, ret); | ||||
| 	kthread_complete_and_exit(&instance->thread_exited, ret); | ||||
| } | ||||
| 
 | ||||
| static int usbatm_heavy_init(struct usbatm_data *instance) | ||||
|  |  | |||
|  | @ -2547,7 +2547,7 @@ static int fsg_main_thread(void *common_) | |||
| 	up_write(&common->filesem); | ||||
| 
 | ||||
| 	/* Let fsg_unbind() know the thread has exited */ | ||||
| 	complete_and_exit(&common->thread_notifier, 0); | ||||
| 	kthread_complete_and_exit(&common->thread_notifier, 0); | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
|  |  | |||
|  | @ -161,5 +161,5 @@ static int jffs2_garbage_collect_thread(void *_c) | |||
| 	spin_lock(&c->erase_completion_lock); | ||||
| 	c->gc_task = NULL; | ||||
| 	spin_unlock(&c->erase_completion_lock); | ||||
| 	complete_and_exit(&c->gc_thread_exit, 0); | ||||
| 	kthread_complete_and_exit(&c->gc_thread_exit, 0); | ||||
| } | ||||
|  |  | |||
|  | @ -187,7 +187,6 @@ static inline void might_fault(void) { } | |||
| #endif | ||||
| 
 | ||||
| void do_exit(long error_code) __noreturn; | ||||
| void complete_and_exit(struct completion *, long) __noreturn; | ||||
| 
 | ||||
| extern int num_to_str(char *buf, int size, | ||||
| 		      unsigned long long num, unsigned int width); | ||||
|  |  | |||
|  | @ -71,6 +71,7 @@ int kthread_park(struct task_struct *k); | |||
| void kthread_unpark(struct task_struct *k); | ||||
| void kthread_parkme(void); | ||||
| void kthread_exit(long result) __noreturn; | ||||
| void kthread_complete_and_exit(struct completion *, long) __noreturn; | ||||
| 
 | ||||
| int kthreadd(void *unused); | ||||
| extern struct task_struct *kthreadd_task; | ||||
|  |  | |||
|  | @ -891,15 +891,6 @@ void __noreturn make_task_dead(int signr) | |||
| 	do_exit(signr); | ||||
| } | ||||
| 
 | ||||
| void complete_and_exit(struct completion *comp, long code) | ||||
| { | ||||
| 	if (comp) | ||||
| 		complete(comp); | ||||
| 
 | ||||
| 	do_exit(code); | ||||
| } | ||||
| EXPORT_SYMBOL(complete_and_exit); | ||||
| 
 | ||||
| SYSCALL_DEFINE1(exit, int, error_code) | ||||
| { | ||||
| 	do_exit((error_code&0xff)<<8); | ||||
|  |  | |||
|  | @ -283,6 +283,27 @@ void __noreturn kthread_exit(long result) | |||
| 	do_exit(result); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * kthread_complete_and exit - Exit the current kthread. | ||||
|  * @comp: Completion to complete | ||||
|  * @code: The integer value to return to kthread_stop(). | ||||
|  * | ||||
|  * If present complete @comp and the reuturn code to kthread_stop(). | ||||
|  * | ||||
|  * A kernel thread whose module may be removed after the completion of | ||||
|  * @comp can use this function exit safely. | ||||
|  * | ||||
|  * Does not return. | ||||
|  */ | ||||
| void __noreturn kthread_complete_and_exit(struct completion *comp, long code) | ||||
| { | ||||
| 	if (comp) | ||||
| 		complete(comp); | ||||
| 
 | ||||
| 	kthread_exit(code); | ||||
| } | ||||
| EXPORT_SYMBOL(kthread_complete_and_exit); | ||||
| 
 | ||||
| static int kthread(void *_create) | ||||
| { | ||||
| 	static const struct sched_param param = { .sched_priority = 0 }; | ||||
|  |  | |||
|  | @ -17,7 +17,7 @@ | |||
| void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch) | ||||
| { | ||||
| 	try_catch->try_result = -EFAULT; | ||||
| 	complete_and_exit(try_catch->try_completion, -EFAULT); | ||||
| 	kthread_complete_and_exit(try_catch->try_completion, -EFAULT); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(kunit_try_catch_throw); | ||||
| 
 | ||||
|  | @ -27,7 +27,7 @@ static int kunit_generic_run_threadfn_adapter(void *data) | |||
| 
 | ||||
| 	try_catch->try(try_catch->context); | ||||
| 
 | ||||
| 	complete_and_exit(try_catch->try_completion, 0); | ||||
| 	kthread_complete_and_exit(try_catch->try_completion, 0); | ||||
| } | ||||
| 
 | ||||
| static unsigned long kunit_test_timeout(void) | ||||
|  |  | |||
|  | @ -171,7 +171,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, | |||
| 		"kthread_exit", | ||||
| 		"make_task_dead", | ||||
| 		"__module_put_and_kthread_exit", | ||||
| 		"complete_and_exit", | ||||
| 		"kthread_complete_and_exit", | ||||
| 		"__reiserfs_panic", | ||||
| 		"lbug_with_loc", | ||||
| 		"fortify_panic", | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Eric W. Biederman
						Eric W. Biederman