mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 08:38:45 +02:00 
			
		
		
		
	[PATCH] "Fix uidhash_lock <-> RXU deadlock" fix
I get storms of warnings from local_bh_enable(). Better-tested patches, please. Cc: Ingo Molnar <mingo@elte.hu> Cc: "Paul E. McKenney" <paulmck@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
		
							parent
							
								
									b8c475be7b
								
							
						
					
					
						commit
						3fa97c9db4
					
				
					 1 changed files with 17 additions and 10 deletions
				
			
		|  | @ -33,6 +33,10 @@ static struct list_head uidhash_table[UIDHASH_SZ]; | |||
|  * The uidhash_lock is mostly taken from process context, but it is | ||||
|  * occasionally also taken from softirq/tasklet context, when | ||||
|  * task-structs get RCU-freed. Hence all locking must be softirq-safe. | ||||
|  * But free_uid() is also called with local interrupts disabled, and running | ||||
|  * local_bh_enable() with local interrupts disabled is an error - we'll run | ||||
|  * softirq callbacks, and they can unconditionally enable interrupts, and | ||||
|  * the caller of free_uid() didn't expect that.. | ||||
|  */ | ||||
| static DEFINE_SPINLOCK(uidhash_lock); | ||||
| 
 | ||||
|  | @ -89,16 +93,19 @@ static inline struct user_struct *uid_hash_find(uid_t uid, struct list_head *has | |||
| struct user_struct *find_user(uid_t uid) | ||||
| { | ||||
| 	struct user_struct *ret; | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	spin_lock_bh(&uidhash_lock); | ||||
| 	spin_lock_irqsave(&uidhash_lock, flags); | ||||
| 	ret = uid_hash_find(uid, uidhashentry(uid)); | ||||
| 	spin_unlock_bh(&uidhash_lock); | ||||
| 	spin_unlock_irqrestore(&uidhash_lock, flags); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| void free_uid(struct user_struct *up) | ||||
| { | ||||
| 	local_bh_disable(); | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	local_irq_save(flags); | ||||
| 	if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) { | ||||
| 		uid_hash_remove(up); | ||||
| 		key_put(up->uid_keyring); | ||||
|  | @ -106,7 +113,7 @@ void free_uid(struct user_struct *up) | |||
| 		kmem_cache_free(uid_cachep, up); | ||||
| 		spin_unlock(&uidhash_lock); | ||||
| 	} | ||||
| 	local_bh_enable(); | ||||
| 	local_irq_restore(flags); | ||||
| } | ||||
| 
 | ||||
| struct user_struct * alloc_uid(uid_t uid) | ||||
|  | @ -114,9 +121,9 @@ struct user_struct * alloc_uid(uid_t uid) | |||
| 	struct list_head *hashent = uidhashentry(uid); | ||||
| 	struct user_struct *up; | ||||
| 
 | ||||
| 	spin_lock_bh(&uidhash_lock); | ||||
| 	spin_lock_irq(&uidhash_lock); | ||||
| 	up = uid_hash_find(uid, hashent); | ||||
| 	spin_unlock_bh(&uidhash_lock); | ||||
| 	spin_unlock_irq(&uidhash_lock); | ||||
| 
 | ||||
| 	if (!up) { | ||||
| 		struct user_struct *new; | ||||
|  | @ -146,7 +153,7 @@ struct user_struct * alloc_uid(uid_t uid) | |||
| 		 * Before adding this, check whether we raced | ||||
| 		 * on adding the same user already.. | ||||
| 		 */ | ||||
| 		spin_lock_bh(&uidhash_lock); | ||||
| 		spin_lock_irq(&uidhash_lock); | ||||
| 		up = uid_hash_find(uid, hashent); | ||||
| 		if (up) { | ||||
| 			key_put(new->uid_keyring); | ||||
|  | @ -156,7 +163,7 @@ struct user_struct * alloc_uid(uid_t uid) | |||
| 			uid_hash_insert(new, hashent); | ||||
| 			up = new; | ||||
| 		} | ||||
| 		spin_unlock_bh(&uidhash_lock); | ||||
| 		spin_unlock_irq(&uidhash_lock); | ||||
| 
 | ||||
| 	} | ||||
| 	return up; | ||||
|  | @ -192,9 +199,9 @@ static int __init uid_cache_init(void) | |||
| 		INIT_LIST_HEAD(uidhash_table + n); | ||||
| 
 | ||||
| 	/* Insert the root user immediately (init already runs as root) */ | ||||
| 	spin_lock_bh(&uidhash_lock); | ||||
| 	spin_lock_irq(&uidhash_lock); | ||||
| 	uid_hash_insert(&root_user, uidhashentry(0)); | ||||
| 	spin_unlock_bh(&uidhash_lock); | ||||
| 	spin_unlock_irq(&uidhash_lock); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Andrew Morton
						Andrew Morton