mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	sched/core: Fix endless loop in pick_next_task()
1) Single cpu machine case.
When rq has only RT tasks, but no one of them can be picked
because of throttling, we enter in endless loop.
pick_next_task_{dl,rt} return NULL.
In pick_next_task_fair() we permanently go to retry
	if (rq->nr_running != rq->cfs.h_nr_running)
		return RETRY_TASK;
(rq->nr_running is not being decremented when rt_rq becomes
throttled).
No chances to unthrottle any rt_rq or to wake fair here,
because of rq is locked permanently and interrupts are
disabled.
2) In case of SMP this can cause a hang too. Although we unlock
   rq in idle_balance(), interrupts are still disabled.
The solution is to check for available tasks in DL and RT
classes instead of checking for sum.
Signed-off-by: Kirill Tkhai <ktkhai@parallels.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1394098321.19290.11.camel@tkhai
Signed-off-by: Ingo Molnar <mingo@kernel.org>
			
			
This commit is contained in:
		
							parent
							
								
									e4aa358b6c
								
							
						
					
					
						commit
						4c6c4e38c4
					
				
					 3 changed files with 15 additions and 11 deletions
				
			
		| 
						 | 
				
			
			@ -6728,7 +6728,9 @@ static int idle_balance(struct rq *this_rq)
 | 
			
		|||
 | 
			
		||||
out:
 | 
			
		||||
	/* Is there a task of a high priority class? */
 | 
			
		||||
	if (this_rq->nr_running != this_rq->cfs.h_nr_running)
 | 
			
		||||
	if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
 | 
			
		||||
	    (this_rq->dl.dl_nr_running ||
 | 
			
		||||
	     (this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt))))
 | 
			
		||||
		pulled_task = -1;
 | 
			
		||||
 | 
			
		||||
	if (pulled_task) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -470,11 +470,6 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 | 
			
		|||
		dequeue_rt_entity(rt_se);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
 | 
			
		||||
{
 | 
			
		||||
	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int rt_se_boosted(struct sched_rt_entity *rt_se)
 | 
			
		||||
{
 | 
			
		||||
	struct rt_rq *rt_rq = group_rt_rq(rt_se);
 | 
			
		||||
| 
						 | 
				
			
			@ -545,11 +540,6 @@ static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 | 
			
		|||
{
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
 | 
			
		||||
{
 | 
			
		||||
	return rt_rq->rt_throttled;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline const struct cpumask *sched_rt_period_mask(void)
 | 
			
		||||
{
 | 
			
		||||
	return cpu_online_mask;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -423,6 +423,18 @@ struct rt_rq {
 | 
			
		|||
#endif
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_RT_GROUP_SCHED
 | 
			
		||||
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
 | 
			
		||||
{
 | 
			
		||||
	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
 | 
			
		||||
}
 | 
			
		||||
#else
 | 
			
		||||
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
 | 
			
		||||
{
 | 
			
		||||
	return rt_rq->rt_throttled;
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/* Deadline class' related fields in a runqueue */
 | 
			
		||||
struct dl_rq {
 | 
			
		||||
	/* runqueue is an rbtree, ordered by deadline */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue