mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	sched/fair: Fix enqueue_task_fair warning
When a cfs rq is throttled, the latter and its child are removed from the leaf list but their nr_running is not changed which includes staying higher than 1. When a task is enqueued in this throttled branch, the cfs rqs must be added back in order to ensure correct ordering in the list but this can only happens if nr_running == 1. When cfs bandwidth is used, we call unconditionnaly list_add_leaf_cfs_rq() when enqueuing an entity to make sure that the complete branch will be added. Similarly unthrottle_cfs_rq() can stop adding cfs in the list when a parent is throttled. Iterate the remaining entity to ensure that the complete branch will be added in the list. Reported-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Tested-by: Christian Borntraeger <borntraeger@de.ibm.com> Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: stable@vger.kernel.org Cc: stable@vger.kernel.org #v5.1+ Link: https://lkml.kernel.org/r/20200306135257.25044-1-vincent.guittot@linaro.org
This commit is contained in:
		
							parent
							
								
									14533a16c4
								
							
						
					
					
						commit
						fe61468b2c
					
				
					 1 changed files with 22 additions and 4 deletions
				
			
		| 
						 | 
				
			
			@ -4136,6 +4136,7 @@ static inline void check_schedstat_required(void)
 | 
			
		|||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline bool cfs_bandwidth_used(void);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * MIGRATION
 | 
			
		||||
| 
						 | 
				
			
			@ -4214,10 +4215,16 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 | 
			
		|||
		__enqueue_entity(cfs_rq, se);
 | 
			
		||||
	se->on_rq = 1;
 | 
			
		||||
 | 
			
		||||
	if (cfs_rq->nr_running == 1) {
 | 
			
		||||
	/*
 | 
			
		||||
	 * When bandwidth control is enabled, cfs might have been removed
 | 
			
		||||
	 * because of a parent been throttled but cfs->nr_running > 1. Try to
 | 
			
		||||
	 * add it unconditionnally.
 | 
			
		||||
	 */
 | 
			
		||||
	if (cfs_rq->nr_running == 1 || cfs_bandwidth_used())
 | 
			
		||||
		list_add_leaf_cfs_rq(cfs_rq);
 | 
			
		||||
 | 
			
		||||
	if (cfs_rq->nr_running == 1)
 | 
			
		||||
		check_enqueue_throttle(cfs_rq);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void __clear_buddies_last(struct sched_entity *se)
 | 
			
		||||
| 
						 | 
				
			
			@ -4808,11 +4815,22 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
 | 
			
		|||
			break;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	assert_list_leaf_cfs_rq(rq);
 | 
			
		||||
 | 
			
		||||
	if (!se)
 | 
			
		||||
		add_nr_running(rq, task_delta);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * The cfs_rq_throttled() breaks in the above iteration can result in
 | 
			
		||||
	 * incomplete leaf list maintenance, resulting in triggering the
 | 
			
		||||
	 * assertion below.
 | 
			
		||||
	 */
 | 
			
		||||
	for_each_sched_entity(se) {
 | 
			
		||||
		cfs_rq = cfs_rq_of(se);
 | 
			
		||||
 | 
			
		||||
		list_add_leaf_cfs_rq(cfs_rq);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	assert_list_leaf_cfs_rq(rq);
 | 
			
		||||
 | 
			
		||||
	/* Determine whether we need to wake up potentially idle CPU: */
 | 
			
		||||
	if (rq->curr == rq->idle && rq->cfs.nr_running)
 | 
			
		||||
		resched_curr(rq);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue