mirror of
https://github.com/torvalds/linux.git
synced 2025-11-03 18:20:25 +02:00
sched/pelt: Relax the sync of load_sum with load_avg
Similarly to util_avg and util_sum, don't sync load_sum with the low bound of load_avg but only ensure that load_sum stays in the correct range. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Tested-by: Sachin Sant <sachinp@linux.ibm.com> Link: https://lkml.kernel.org/r/20220111134659.24961-5-vincent.guittot@linaro.org
This commit is contained in:
parent
95246d1ec8
commit
2d02fa8cc2
1 changed files with 22 additions and 14 deletions
|
|
@ -3028,9 +3028,11 @@ enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||||
static inline void
|
static inline void
|
||||||
dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||||
{
|
{
|
||||||
u32 divider = get_pelt_divider(&se->avg);
|
|
||||||
sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
|
sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
|
||||||
cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
|
sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
|
||||||
|
/* See update_cfs_rq_load_avg() */
|
||||||
|
cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
|
||||||
|
cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline void
|
static inline void
|
||||||
|
|
@ -3513,9 +3515,10 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
|
||||||
static inline void
|
static inline void
|
||||||
update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
|
update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
|
||||||
{
|
{
|
||||||
long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
|
long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
|
||||||
unsigned long load_avg;
|
unsigned long load_avg;
|
||||||
u64 load_sum = 0;
|
u64 load_sum = 0;
|
||||||
|
s64 delta_sum;
|
||||||
u32 divider;
|
u32 divider;
|
||||||
|
|
||||||
if (!runnable_sum)
|
if (!runnable_sum)
|
||||||
|
|
@ -3542,7 +3545,7 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
|
||||||
* assuming all tasks are equally runnable.
|
* assuming all tasks are equally runnable.
|
||||||
*/
|
*/
|
||||||
if (scale_load_down(gcfs_rq->load.weight)) {
|
if (scale_load_down(gcfs_rq->load.weight)) {
|
||||||
load_sum = div_s64(gcfs_rq->avg.load_sum,
|
load_sum = div_u64(gcfs_rq->avg.load_sum,
|
||||||
scale_load_down(gcfs_rq->load.weight));
|
scale_load_down(gcfs_rq->load.weight));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -3559,19 +3562,22 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
|
||||||
running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
|
running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
|
||||||
runnable_sum = max(runnable_sum, running_sum);
|
runnable_sum = max(runnable_sum, running_sum);
|
||||||
|
|
||||||
load_sum = (s64)se_weight(se) * runnable_sum;
|
load_sum = se_weight(se) * runnable_sum;
|
||||||
load_avg = div_s64(load_sum, divider);
|
load_avg = div_u64(load_sum, divider);
|
||||||
|
|
||||||
se->avg.load_sum = runnable_sum;
|
delta_avg = load_avg - se->avg.load_avg;
|
||||||
|
if (!delta_avg)
|
||||||
delta = load_avg - se->avg.load_avg;
|
|
||||||
if (!delta)
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
se->avg.load_avg = load_avg;
|
delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
|
||||||
|
|
||||||
add_positive(&cfs_rq->avg.load_avg, delta);
|
se->avg.load_sum = runnable_sum;
|
||||||
cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
|
se->avg.load_avg = load_avg;
|
||||||
|
add_positive(&cfs_rq->avg.load_avg, delta_avg);
|
||||||
|
add_positive(&cfs_rq->avg.load_sum, delta_sum);
|
||||||
|
/* See update_cfs_rq_load_avg() */
|
||||||
|
cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
|
||||||
|
cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
|
static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
|
||||||
|
|
@ -3687,7 +3693,9 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
||||||
|
|
||||||
r = removed_load;
|
r = removed_load;
|
||||||
sub_positive(&sa->load_avg, r);
|
sub_positive(&sa->load_avg, r);
|
||||||
sa->load_sum = sa->load_avg * divider;
|
sub_positive(&sa->load_sum, r * divider);
|
||||||
|
/* See sa->util_sum below */
|
||||||
|
sa->load_sum = max_t(u32, sa->load_sum, sa->load_avg * PELT_MIN_DIVIDER);
|
||||||
|
|
||||||
r = removed_util;
|
r = removed_util;
|
||||||
sub_positive(&sa->util_avg, r);
|
sub_positive(&sa->util_avg, r);
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue