mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	sched/pelt: Fix task util_est update filtering
Being called for each dequeue, util_est reduces the number of its updates by filtering out when the EWMA signal is different from the task util_avg by less than 1%. It is a problem for a sudden util_avg ramp-up. Due to the decay from a previous high util_avg, EWMA might now be close enough to the new util_avg. No update would then happen while it would leave ue.enqueued with an out-of-date value. Taking into consideration the two util_est members, EWMA and enqueued for the filtering, ensures, for both, an up-to-date value. This is for now an issue only for the trace probe that might return the stale value. Functional-wise, it isn't a problem, as the value is always accessed through max(enqueued, ewma). This problem has been observed using LISA's UtilConvergence:test_means on the sd845c board. No regression observed with Hackbench on sd845c and Perf-bench sched pipe on hikey/hikey960. Signed-off-by: Vincent Donnefort <vincent.donnefort@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org> Link: https://lkml.kernel.org/r/20210225165820.1377125-1-vincent.donnefort@arm.com
This commit is contained in:
		
							parent
							
								
									39a2a6eb5c
								
							
						
					
					
						commit
						b89997aa88
					
				
					 1 changed files with 12 additions and 3 deletions
				
			
		| 
						 | 
					@ -3941,6 +3941,8 @@ static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
 | 
				
			||||||
	trace_sched_util_est_cfs_tp(cfs_rq);
 | 
						trace_sched_util_est_cfs_tp(cfs_rq);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Check if a (signed) value is within a specified (unsigned) margin,
 | 
					 * Check if a (signed) value is within a specified (unsigned) margin,
 | 
				
			||||||
 * based on the observation that:
 | 
					 * based on the observation that:
 | 
				
			||||||
| 
						 | 
					@ -3958,7 +3960,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
 | 
				
			||||||
				   struct task_struct *p,
 | 
									   struct task_struct *p,
 | 
				
			||||||
				   bool task_sleep)
 | 
									   bool task_sleep)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	long last_ewma_diff;
 | 
						long last_ewma_diff, last_enqueued_diff;
 | 
				
			||||||
	struct util_est ue;
 | 
						struct util_est ue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!sched_feat(UTIL_EST))
 | 
						if (!sched_feat(UTIL_EST))
 | 
				
			||||||
| 
						 | 
					@ -3979,6 +3981,8 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
 | 
				
			||||||
	if (ue.enqueued & UTIL_AVG_UNCHANGED)
 | 
						if (ue.enqueued & UTIL_AVG_UNCHANGED)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						last_enqueued_diff = ue.enqueued;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Reset EWMA on utilization increases, the moving average is used only
 | 
						 * Reset EWMA on utilization increases, the moving average is used only
 | 
				
			||||||
	 * to smooth utilization decreases.
 | 
						 * to smooth utilization decreases.
 | 
				
			||||||
| 
						 | 
					@ -3992,12 +3996,17 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Skip update of task's estimated utilization when its EWMA is
 | 
						 * Skip update of task's estimated utilization when its members are
 | 
				
			||||||
	 * already ~1% close to its last activation value.
 | 
						 * already ~1% close to its last activation value.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	last_ewma_diff = ue.enqueued - ue.ewma;
 | 
						last_ewma_diff = ue.enqueued - ue.ewma;
 | 
				
			||||||
	if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100)))
 | 
						last_enqueued_diff -= ue.enqueued;
 | 
				
			||||||
 | 
						if (within_margin(last_ewma_diff, UTIL_EST_MARGIN)) {
 | 
				
			||||||
 | 
							if (!within_margin(last_enqueued_diff, UTIL_EST_MARGIN))
 | 
				
			||||||
 | 
								goto done;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * To avoid overestimation of actual task utilization, skip updates if
 | 
						 * To avoid overestimation of actual task utilization, skip updates if
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue