forked from mirrors/linux
		
	move exit_task_work() past exit_files() et.al.
... and get rid of PF_EXITING check in task_work_add(). Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
		
							parent
							
								
									67d1214551
								
							
						
					
					
						commit
						ed3e694d78
					
				
					 2 changed files with 13 additions and 23 deletions
				
			
		|  | @ -953,14 +953,11 @@ void do_exit(long code) | |||
| 	exit_signals(tsk);  /* sets PF_EXITING */ | ||||
| 	/*
 | ||||
| 	 * tsk->flags are checked in the futex code to protect against | ||||
| 	 * an exiting task cleaning up the robust pi futexes, and in | ||||
| 	 * task_work_add() to avoid the race with exit_task_work(). | ||||
| 	 * an exiting task cleaning up the robust pi futexes. | ||||
| 	 */ | ||||
| 	smp_mb(); | ||||
| 	raw_spin_unlock_wait(&tsk->pi_lock); | ||||
| 
 | ||||
| 	exit_task_work(tsk); | ||||
| 
 | ||||
| 	if (unlikely(in_atomic())) | ||||
| 		printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", | ||||
| 				current->comm, task_pid_nr(current), | ||||
|  | @ -995,6 +992,7 @@ void do_exit(long code) | |||
| 	exit_shm(tsk); | ||||
| 	exit_files(tsk); | ||||
| 	exit_fs(tsk); | ||||
| 	exit_task_work(tsk); | ||||
| 	check_stack_usage(); | ||||
| 	exit_thread(); | ||||
| 
 | ||||
|  |  | |||
|  | @ -5,34 +5,26 @@ | |||
| int | ||||
| task_work_add(struct task_struct *task, struct callback_head *twork, bool notify) | ||||
| { | ||||
| 	struct callback_head *last, *first; | ||||
| 	unsigned long flags; | ||||
| 	int err = -ESRCH; | ||||
| 
 | ||||
| #ifndef TIF_NOTIFY_RESUME | ||||
| 	if (notify) | ||||
| 		return -ENOTSUPP; | ||||
| #endif | ||||
| 	/*
 | ||||
| 	 * We must not insert the new work if the task has already passed | ||||
| 	 * exit_task_work(). We rely on do_exit()->raw_spin_unlock_wait() | ||||
| 	 * and check PF_EXITING under pi_lock. | ||||
| 	 * Not inserting the new work if the task has already passed | ||||
| 	 * exit_task_work() is the responisbility of callers. | ||||
| 	 */ | ||||
| 	raw_spin_lock_irqsave(&task->pi_lock, flags); | ||||
| 	if (likely(!(task->flags & PF_EXITING))) { | ||||
| 		struct callback_head *last = task->task_works; | ||||
| 		struct callback_head *first = last ? last->next : twork; | ||||
| 	last = task->task_works; | ||||
| 	first = last ? last->next : twork; | ||||
| 	twork->next = first; | ||||
| 	if (last) | ||||
| 		last->next = twork; | ||||
| 	task->task_works = twork; | ||||
| 		err = 0; | ||||
| 	} | ||||
| 	raw_spin_unlock_irqrestore(&task->pi_lock, flags); | ||||
| 
 | ||||
| 	/* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */ | ||||
| 	if (likely(!err) && notify) | ||||
| 	if (notify) | ||||
| 		set_notify_resume(task); | ||||
| 	return err; | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| struct callback_head * | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Al Viro
						Al Viro