mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	sched/fair: Reorder enqueue/dequeue_task_fair path
The walk through the cgroup hierarchy during the enqueue/dequeue of a task is split in 2 distinct parts for throttled cfs_rq without any added value but making code less readable. Change the code ordering such that everything related to a cfs_rq (throttled or not) will be done in the same loop. In addition, the same steps ordering is used when updating a cfs_rq: - update_load_avg - update_cfs_group - update *h_nr_running This reordering enables the use of h_nr_running in PELT algorithm. No functional and performance changes are expected and have been noticed during tests. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: "Dietmar Eggemann <dietmar.eggemann@arm.com>" Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Valentin Schneider <valentin.schneider@arm.com> Cc: Phil Auld <pauld@redhat.com> Cc: Hillf Danton <hdanton@sina.com> Link: https://lore.kernel.org/r/20200224095223.13361-5-mgorman@techsingularity.net
This commit is contained in:
		
							parent
							
								
									b2b2042b20
								
							
						
					
					
						commit
						6d4d22468d
					
				
					 1 changed files with 20 additions and 22 deletions
				
			
		| 
						 | 
				
			
			@ -5260,32 +5260,31 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 | 
			
		|||
		cfs_rq = cfs_rq_of(se);
 | 
			
		||||
		enqueue_entity(cfs_rq, se, flags);
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * end evaluation on encountering a throttled cfs_rq
 | 
			
		||||
		 *
 | 
			
		||||
		 * note: in the case of encountering a throttled cfs_rq we will
 | 
			
		||||
		 * post the final h_nr_running increment below.
 | 
			
		||||
		 */
 | 
			
		||||
		if (cfs_rq_throttled(cfs_rq))
 | 
			
		||||
			break;
 | 
			
		||||
		cfs_rq->h_nr_running++;
 | 
			
		||||
		cfs_rq->idle_h_nr_running += idle_h_nr_running;
 | 
			
		||||
 | 
			
		||||
		/* end evaluation on encountering a throttled cfs_rq */
 | 
			
		||||
		if (cfs_rq_throttled(cfs_rq))
 | 
			
		||||
			goto enqueue_throttle;
 | 
			
		||||
 | 
			
		||||
		flags = ENQUEUE_WAKEUP;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for_each_sched_entity(se) {
 | 
			
		||||
		cfs_rq = cfs_rq_of(se);
 | 
			
		||||
		cfs_rq->h_nr_running++;
 | 
			
		||||
		cfs_rq->idle_h_nr_running += idle_h_nr_running;
 | 
			
		||||
 | 
			
		||||
		/* end evaluation on encountering a throttled cfs_rq */
 | 
			
		||||
		if (cfs_rq_throttled(cfs_rq))
 | 
			
		||||
			break;
 | 
			
		||||
			goto enqueue_throttle;
 | 
			
		||||
 | 
			
		||||
		update_load_avg(cfs_rq, se, UPDATE_TG);
 | 
			
		||||
		update_cfs_group(se);
 | 
			
		||||
 | 
			
		||||
		cfs_rq->h_nr_running++;
 | 
			
		||||
		cfs_rq->idle_h_nr_running += idle_h_nr_running;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
enqueue_throttle:
 | 
			
		||||
	if (!se) {
 | 
			
		||||
		add_nr_running(rq, 1);
 | 
			
		||||
		/*
 | 
			
		||||
| 
						 | 
				
			
			@ -5346,17 +5345,13 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 | 
			
		|||
		cfs_rq = cfs_rq_of(se);
 | 
			
		||||
		dequeue_entity(cfs_rq, se, flags);
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * end evaluation on encountering a throttled cfs_rq
 | 
			
		||||
		 *
 | 
			
		||||
		 * note: in the case of encountering a throttled cfs_rq we will
 | 
			
		||||
		 * post the final h_nr_running decrement below.
 | 
			
		||||
		*/
 | 
			
		||||
		if (cfs_rq_throttled(cfs_rq))
 | 
			
		||||
			break;
 | 
			
		||||
		cfs_rq->h_nr_running--;
 | 
			
		||||
		cfs_rq->idle_h_nr_running -= idle_h_nr_running;
 | 
			
		||||
 | 
			
		||||
		/* end evaluation on encountering a throttled cfs_rq */
 | 
			
		||||
		if (cfs_rq_throttled(cfs_rq))
 | 
			
		||||
			goto dequeue_throttle;
 | 
			
		||||
 | 
			
		||||
		/* Don't dequeue parent if it has other entities besides us */
 | 
			
		||||
		if (cfs_rq->load.weight) {
 | 
			
		||||
			/* Avoid re-evaluating load for this entity: */
 | 
			
		||||
| 
						 | 
				
			
			@ -5374,16 +5369,19 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 | 
			
		|||
 | 
			
		||||
	for_each_sched_entity(se) {
 | 
			
		||||
		cfs_rq = cfs_rq_of(se);
 | 
			
		||||
		cfs_rq->h_nr_running--;
 | 
			
		||||
		cfs_rq->idle_h_nr_running -= idle_h_nr_running;
 | 
			
		||||
 | 
			
		||||
		/* end evaluation on encountering a throttled cfs_rq */
 | 
			
		||||
		if (cfs_rq_throttled(cfs_rq))
 | 
			
		||||
			break;
 | 
			
		||||
			goto dequeue_throttle;
 | 
			
		||||
 | 
			
		||||
		update_load_avg(cfs_rq, se, UPDATE_TG);
 | 
			
		||||
		update_cfs_group(se);
 | 
			
		||||
 | 
			
		||||
		cfs_rq->h_nr_running--;
 | 
			
		||||
		cfs_rq->idle_h_nr_running -= idle_h_nr_running;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
dequeue_throttle:
 | 
			
		||||
	if (!se)
 | 
			
		||||
		sub_nr_running(rq, 1);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue