mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 08:38:45 +02:00 
			
		
		
		
	sched: run_posix_cpu_timers: Don't check ->exit_state, use lock_task_sighand()
run_posix_cpu_timers() doesn't work if current has already passed
exit_notify(). This was needed to prevent the races with do_wait().
Since ea6d290c ->signal is always valid and can't go away. We can
remove the "tsk->exit_state == 0" in fastpath_timer_check() and
convert run_posix_cpu_timers() to use lock_task_sighand().
Note: it makes sense to take group_leader's sighand instead, the
sub-thread still uses CPU after release_task(). But we need more
changes to do this.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20100610231018.GA25942@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
			
			
This commit is contained in:
		
							parent
							
								
									bfac700918
								
							
						
					
					
						commit
						0bdd2ed413
					
				
					 1 changed files with 4 additions and 6 deletions
				
			
		|  | @ -1272,10 +1272,6 @@ static inline int fastpath_timer_check(struct task_struct *tsk) | ||||||
| { | { | ||||||
| 	struct signal_struct *sig; | 	struct signal_struct *sig; | ||||||
| 
 | 
 | ||||||
| 	/* tsk == current, ensure it is safe to use ->signal/sighand */ |  | ||||||
| 	if (unlikely(tsk->exit_state)) |  | ||||||
| 		return 0; |  | ||||||
| 
 |  | ||||||
| 	if (!task_cputime_zero(&tsk->cputime_expires)) { | 	if (!task_cputime_zero(&tsk->cputime_expires)) { | ||||||
| 		struct task_cputime task_sample = { | 		struct task_cputime task_sample = { | ||||||
| 			.utime = tsk->utime, | 			.utime = tsk->utime, | ||||||
|  | @ -1308,6 +1304,7 @@ void run_posix_cpu_timers(struct task_struct *tsk) | ||||||
| { | { | ||||||
| 	LIST_HEAD(firing); | 	LIST_HEAD(firing); | ||||||
| 	struct k_itimer *timer, *next; | 	struct k_itimer *timer, *next; | ||||||
|  | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
| 	BUG_ON(!irqs_disabled()); | 	BUG_ON(!irqs_disabled()); | ||||||
| 
 | 
 | ||||||
|  | @ -1318,7 +1315,8 @@ void run_posix_cpu_timers(struct task_struct *tsk) | ||||||
| 	if (!fastpath_timer_check(tsk)) | 	if (!fastpath_timer_check(tsk)) | ||||||
| 		return; | 		return; | ||||||
| 
 | 
 | ||||||
| 	spin_lock(&tsk->sighand->siglock); | 	if (!lock_task_sighand(tsk, &flags)) | ||||||
|  | 		return; | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Here we take off tsk->signal->cpu_timers[N] and | 	 * Here we take off tsk->signal->cpu_timers[N] and | ||||||
| 	 * tsk->cpu_timers[N] all the timers that are firing, and | 	 * tsk->cpu_timers[N] all the timers that are firing, and | ||||||
|  | @ -1340,7 +1338,7 @@ void run_posix_cpu_timers(struct task_struct *tsk) | ||||||
| 	 * that gets the timer lock before we do will give it up and | 	 * that gets the timer lock before we do will give it up and | ||||||
| 	 * spin until we've taken care of that timer below. | 	 * spin until we've taken care of that timer below. | ||||||
| 	 */ | 	 */ | ||||||
| 	spin_unlock(&tsk->sighand->siglock); | 	unlock_task_sighand(tsk, &flags); | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Now that all the timers on our list have the firing flag, | 	 * Now that all the timers on our list have the firing flag, | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Oleg Nesterov
						Oleg Nesterov