forked from mirrors/linux
		
	module: Remove stop_machine from module unloading
Remove stop_machine from module unloading by adding new reference counting algorithm. This atomic refcounter works like a semaphore, it can get (be incremented) only when the counter is not 0. When loading a module, kmodule subsystem sets the counter MODULE_REF_BASE (= 1). And when unloading the module, it subtracts MODULE_REF_BASE from the counter. If no one refers the module, the refcounter becomes 0 and we can remove the module safely. If someone referes it, we try to recover the counter by adding MODULE_REF_BASE unless the counter becomes 0, because the referrer can put the module right before recovering. If the recovering is failed, we can get the 0 refcount and it never be incremented again, it can be removed safely too. Note that __module_get() forcibly gets the module refcounter, users should use try_module_get() instead of that. Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
		
							parent
							
								
									2f35c41f58
								
							
						
					
					
						commit
						e513cc1c07
					
				
					 1 changed files with 40 additions and 29 deletions
				
			
		|  | @ -42,7 +42,6 @@ | ||||||
| #include <linux/vermagic.h> | #include <linux/vermagic.h> | ||||||
| #include <linux/notifier.h> | #include <linux/notifier.h> | ||||||
| #include <linux/sched.h> | #include <linux/sched.h> | ||||||
| #include <linux/stop_machine.h> |  | ||||||
| #include <linux/device.h> | #include <linux/device.h> | ||||||
| #include <linux/string.h> | #include <linux/string.h> | ||||||
| #include <linux/mutex.h> | #include <linux/mutex.h> | ||||||
|  | @ -98,7 +97,7 @@ | ||||||
|  * 1) List of modules (also safely readable with preempt_disable), |  * 1) List of modules (also safely readable with preempt_disable), | ||||||
|  * 2) module_use links, |  * 2) module_use links, | ||||||
|  * 3) module_addr_min/module_addr_max. |  * 3) module_addr_min/module_addr_max. | ||||||
|  * (delete uses stop_machine/add uses RCU list operations). */ |  * (delete and add uses RCU list operations). */ | ||||||
| DEFINE_MUTEX(module_mutex); | DEFINE_MUTEX(module_mutex); | ||||||
| EXPORT_SYMBOL_GPL(module_mutex); | EXPORT_SYMBOL_GPL(module_mutex); | ||||||
| static LIST_HEAD(modules); | static LIST_HEAD(modules); | ||||||
|  | @ -628,14 +627,23 @@ static char last_unloaded_module[MODULE_NAME_LEN+1]; | ||||||
| 
 | 
 | ||||||
| EXPORT_TRACEPOINT_SYMBOL(module_get); | EXPORT_TRACEPOINT_SYMBOL(module_get); | ||||||
| 
 | 
 | ||||||
|  | /* MODULE_REF_BASE is the base reference count by kmodule loader. */ | ||||||
|  | #define MODULE_REF_BASE	1 | ||||||
|  | 
 | ||||||
| /* Init the unload section of the module. */ | /* Init the unload section of the module. */ | ||||||
| static int module_unload_init(struct module *mod) | static int module_unload_init(struct module *mod) | ||||||
| { | { | ||||||
|  | 	/*
 | ||||||
|  | 	 * Initialize reference counter to MODULE_REF_BASE. | ||||||
|  | 	 * refcnt == 0 means module is going. | ||||||
|  | 	 */ | ||||||
|  | 	atomic_set(&mod->refcnt, MODULE_REF_BASE); | ||||||
|  | 
 | ||||||
| 	INIT_LIST_HEAD(&mod->source_list); | 	INIT_LIST_HEAD(&mod->source_list); | ||||||
| 	INIT_LIST_HEAD(&mod->target_list); | 	INIT_LIST_HEAD(&mod->target_list); | ||||||
| 
 | 
 | ||||||
| 	/* Hold reference count during initialization. */ | 	/* Hold reference count during initialization. */ | ||||||
| 	atomic_set(&mod->refcnt, 1); | 	atomic_inc(&mod->refcnt); | ||||||
| 
 | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
|  | @ -734,39 +742,39 @@ static inline int try_force_unload(unsigned int flags) | ||||||
| } | } | ||||||
| #endif /* CONFIG_MODULE_FORCE_UNLOAD */ | #endif /* CONFIG_MODULE_FORCE_UNLOAD */ | ||||||
| 
 | 
 | ||||||
| struct stopref | /* Try to release refcount of module, 0 means success. */ | ||||||
|  | static int try_release_module_ref(struct module *mod) | ||||||
| { | { | ||||||
| 	struct module *mod; | 	int ret; | ||||||
| 	int flags; |  | ||||||
| 	int *forced; |  | ||||||
| }; |  | ||||||
| 
 | 
 | ||||||
| /* Whole machine is stopped with interrupts off when this runs. */ | 	/* Try to decrement refcnt which we set at loading */ | ||||||
| static int __try_stop_module(void *_sref) | 	ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt); | ||||||
| { | 	BUG_ON(ret < 0); | ||||||
| 	struct stopref *sref = _sref; | 	if (ret) | ||||||
|  | 		/* Someone can put this right now, recover with checking */ | ||||||
|  | 		ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0); | ||||||
| 
 | 
 | ||||||
| 	/* If it's not unused, quit unless we're forcing. */ | 	return ret; | ||||||
| 	if (module_refcount(sref->mod) != 0) { |  | ||||||
| 		if (!(*sref->forced = try_force_unload(sref->flags))) |  | ||||||
| 			return -EWOULDBLOCK; |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	/* Mark it as dying. */ |  | ||||||
| 	sref->mod->state = MODULE_STATE_GOING; |  | ||||||
| 	return 0; |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int try_stop_module(struct module *mod, int flags, int *forced) | static int try_stop_module(struct module *mod, int flags, int *forced) | ||||||
| { | { | ||||||
| 	struct stopref sref = { mod, flags, forced }; | 	/* If it's not unused, quit unless we're forcing. */ | ||||||
|  | 	if (try_release_module_ref(mod) != 0) { | ||||||
|  | 		*forced = try_force_unload(flags); | ||||||
|  | 		if (!(*forced)) | ||||||
|  | 			return -EWOULDBLOCK; | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	return stop_machine(__try_stop_module, &sref, NULL); | 	/* Mark it as dying. */ | ||||||
|  | 	mod->state = MODULE_STATE_GOING; | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| unsigned long module_refcount(struct module *mod) | unsigned long module_refcount(struct module *mod) | ||||||
| { | { | ||||||
| 	return (unsigned long)atomic_read(&mod->refcnt); | 	return (unsigned long)atomic_read(&mod->refcnt) - MODULE_REF_BASE; | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(module_refcount); | EXPORT_SYMBOL(module_refcount); | ||||||
| 
 | 
 | ||||||
|  | @ -921,11 +929,11 @@ bool try_module_get(struct module *module) | ||||||
| 
 | 
 | ||||||
| 	if (module) { | 	if (module) { | ||||||
| 		preempt_disable(); | 		preempt_disable(); | ||||||
| 
 | 		/* Note: here, we can fail to get a reference */ | ||||||
| 		if (likely(module_is_live(module))) { | 		if (likely(module_is_live(module) && | ||||||
| 			atomic_inc(&module->refcnt); | 			   atomic_inc_not_zero(&module->refcnt) != 0)) | ||||||
| 			trace_module_get(module, _RET_IP_); | 			trace_module_get(module, _RET_IP_); | ||||||
| 		} else | 		else | ||||||
| 			ret = false; | 			ret = false; | ||||||
| 
 | 
 | ||||||
| 		preempt_enable(); | 		preempt_enable(); | ||||||
|  | @ -936,9 +944,12 @@ EXPORT_SYMBOL(try_module_get); | ||||||
| 
 | 
 | ||||||
| void module_put(struct module *module) | void module_put(struct module *module) | ||||||
| { | { | ||||||
|  | 	int ret; | ||||||
|  | 
 | ||||||
| 	if (module) { | 	if (module) { | ||||||
| 		preempt_disable(); | 		preempt_disable(); | ||||||
| 		atomic_dec(&module->refcnt); | 		ret = atomic_dec_if_positive(&module->refcnt); | ||||||
|  | 		WARN_ON(ret < 0);	/* Failed to put refcount */ | ||||||
| 		trace_module_put(module, _RET_IP_); | 		trace_module_put(module, _RET_IP_); | ||||||
| 		preempt_enable(); | 		preempt_enable(); | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Masami Hiramatsu
						Masami Hiramatsu