mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	sched: Unify runtime accounting across classes
All classes use sched_entity::exec_start to track runtime and have copies of the exact same code around to compute runtime. Collapse all that. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Daniel Bristot de Oliveira <bristot@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Phil Auld <pauld@redhat.com> Reviewed-by: Valentin Schneider <vschneid@redhat.com> Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org> Link: https://lkml.kernel.org/r/54d148a144f26d9559698c4dd82d8859038a7380.1699095159.git.bristot@kernel.org
This commit is contained in:
		
							parent
							
								
									ee4373dc90
								
							
						
					
					
						commit
						5d69eca542
					
				
					 6 changed files with 53 additions and 61 deletions
				
			
		|  | @ -523,7 +523,7 @@ struct sched_statistics { | ||||||
| 	u64				block_max; | 	u64				block_max; | ||||||
| 	s64				sum_block_runtime; | 	s64				sum_block_runtime; | ||||||
| 
 | 
 | ||||||
| 	u64				exec_max; | 	s64				exec_max; | ||||||
| 	u64				slice_max; | 	u64				slice_max; | ||||||
| 
 | 
 | ||||||
| 	u64				nr_migrations_cold; | 	u64				nr_migrations_cold; | ||||||
|  |  | ||||||
|  | @ -1275,9 +1275,8 @@ static void update_curr_dl(struct rq *rq) | ||||||
| { | { | ||||||
| 	struct task_struct *curr = rq->curr; | 	struct task_struct *curr = rq->curr; | ||||||
| 	struct sched_dl_entity *dl_se = &curr->dl; | 	struct sched_dl_entity *dl_se = &curr->dl; | ||||||
| 	u64 delta_exec, scaled_delta_exec; | 	s64 delta_exec, scaled_delta_exec; | ||||||
| 	int cpu = cpu_of(rq); | 	int cpu = cpu_of(rq); | ||||||
| 	u64 now; |  | ||||||
| 
 | 
 | ||||||
| 	if (!dl_task(curr) || !on_dl_rq(dl_se)) | 	if (!dl_task(curr) || !on_dl_rq(dl_se)) | ||||||
| 		return; | 		return; | ||||||
|  | @ -1290,21 +1289,13 @@ static void update_curr_dl(struct rq *rq) | ||||||
| 	 * natural solution, but the full ramifications of this | 	 * natural solution, but the full ramifications of this | ||||||
| 	 * approach need further study. | 	 * approach need further study. | ||||||
| 	 */ | 	 */ | ||||||
| 	now = rq_clock_task(rq); | 	delta_exec = update_curr_common(rq); | ||||||
| 	delta_exec = now - curr->se.exec_start; | 	if (unlikely(delta_exec <= 0)) { | ||||||
| 	if (unlikely((s64)delta_exec <= 0)) { |  | ||||||
| 		if (unlikely(dl_se->dl_yielded)) | 		if (unlikely(dl_se->dl_yielded)) | ||||||
| 			goto throttle; | 			goto throttle; | ||||||
| 		return; | 		return; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	schedstat_set(curr->stats.exec_max, |  | ||||||
| 		      max(curr->stats.exec_max, delta_exec)); |  | ||||||
| 
 |  | ||||||
| 	trace_sched_stat_runtime(curr, delta_exec, 0); |  | ||||||
| 
 |  | ||||||
| 	update_current_exec_runtime(curr, now, delta_exec); |  | ||||||
| 
 |  | ||||||
| 	if (dl_entity_is_special(dl_se)) | 	if (dl_entity_is_special(dl_se)) | ||||||
| 		return; | 		return; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1103,23 +1103,17 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq) | ||||||
| } | } | ||||||
| #endif /* CONFIG_SMP */ | #endif /* CONFIG_SMP */ | ||||||
| 
 | 
 | ||||||
| /*
 | static s64 update_curr_se(struct rq *rq, struct sched_entity *curr) | ||||||
|  * Update the current task's runtime statistics. |  | ||||||
|  */ |  | ||||||
| static void update_curr(struct cfs_rq *cfs_rq) |  | ||||||
| { | { | ||||||
| 	struct sched_entity *curr = cfs_rq->curr; | 	u64 now = rq_clock_task(rq); | ||||||
| 	u64 now = rq_clock_task(rq_of(cfs_rq)); | 	s64 delta_exec; | ||||||
| 	u64 delta_exec; |  | ||||||
| 
 |  | ||||||
| 	if (unlikely(!curr)) |  | ||||||
| 		return; |  | ||||||
| 
 | 
 | ||||||
| 	delta_exec = now - curr->exec_start; | 	delta_exec = now - curr->exec_start; | ||||||
| 	if (unlikely((s64)delta_exec <= 0)) | 	if (unlikely(delta_exec <= 0)) | ||||||
| 		return; | 		return delta_exec; | ||||||
| 
 | 
 | ||||||
| 	curr->exec_start = now; | 	curr->exec_start = now; | ||||||
|  | 	curr->sum_exec_runtime += delta_exec; | ||||||
| 
 | 
 | ||||||
| 	if (schedstat_enabled()) { | 	if (schedstat_enabled()) { | ||||||
| 		struct sched_statistics *stats; | 		struct sched_statistics *stats; | ||||||
|  | @ -1129,8 +1123,43 @@ static void update_curr(struct cfs_rq *cfs_rq) | ||||||
| 				max(delta_exec, stats->exec_max)); | 				max(delta_exec, stats->exec_max)); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	curr->sum_exec_runtime += delta_exec; | 	return delta_exec; | ||||||
| 	schedstat_add(cfs_rq->exec_clock, delta_exec); | } | ||||||
|  | 
 | ||||||
|  | /*
 | ||||||
|  |  * Used by other classes to account runtime. | ||||||
|  |  */ | ||||||
|  | s64 update_curr_common(struct rq *rq) | ||||||
|  | { | ||||||
|  | 	struct task_struct *curr = rq->curr; | ||||||
|  | 	s64 delta_exec; | ||||||
|  | 
 | ||||||
|  | 	delta_exec = update_curr_se(rq, &curr->se); | ||||||
|  | 	if (unlikely(delta_exec <= 0)) | ||||||
|  | 		return delta_exec; | ||||||
|  | 
 | ||||||
|  | 	trace_sched_stat_runtime(curr, delta_exec, 0); | ||||||
|  | 
 | ||||||
|  | 	account_group_exec_runtime(curr, delta_exec); | ||||||
|  | 	cgroup_account_cputime(curr, delta_exec); | ||||||
|  | 
 | ||||||
|  | 	return delta_exec; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /*
 | ||||||
|  |  * Update the current task's runtime statistics. | ||||||
|  |  */ | ||||||
|  | static void update_curr(struct cfs_rq *cfs_rq) | ||||||
|  | { | ||||||
|  | 	struct sched_entity *curr = cfs_rq->curr; | ||||||
|  | 	s64 delta_exec; | ||||||
|  | 
 | ||||||
|  | 	if (unlikely(!curr)) | ||||||
|  | 		return; | ||||||
|  | 
 | ||||||
|  | 	delta_exec = update_curr_se(rq_of(cfs_rq), curr); | ||||||
|  | 	if (unlikely(delta_exec <= 0)) | ||||||
|  | 		return; | ||||||
| 
 | 
 | ||||||
| 	curr->vruntime += calc_delta_fair(delta_exec, curr); | 	curr->vruntime += calc_delta_fair(delta_exec, curr); | ||||||
| 	update_deadline(cfs_rq, curr); | 	update_deadline(cfs_rq, curr); | ||||||
|  |  | ||||||
|  | @ -1002,24 +1002,15 @@ static void update_curr_rt(struct rq *rq) | ||||||
| { | { | ||||||
| 	struct task_struct *curr = rq->curr; | 	struct task_struct *curr = rq->curr; | ||||||
| 	struct sched_rt_entity *rt_se = &curr->rt; | 	struct sched_rt_entity *rt_se = &curr->rt; | ||||||
| 	u64 delta_exec; | 	s64 delta_exec; | ||||||
| 	u64 now; |  | ||||||
| 
 | 
 | ||||||
| 	if (curr->sched_class != &rt_sched_class) | 	if (curr->sched_class != &rt_sched_class) | ||||||
| 		return; | 		return; | ||||||
| 
 | 
 | ||||||
| 	now = rq_clock_task(rq); | 	delta_exec = update_curr_common(rq); | ||||||
| 	delta_exec = now - curr->se.exec_start; | 	if (unlikely(delta_exec <= 0)) | ||||||
| 	if (unlikely((s64)delta_exec <= 0)) |  | ||||||
| 		return; | 		return; | ||||||
| 
 | 
 | ||||||
| 	schedstat_set(curr->stats.exec_max, |  | ||||||
| 		      max(curr->stats.exec_max, delta_exec)); |  | ||||||
| 
 |  | ||||||
| 	trace_sched_stat_runtime(curr, delta_exec, 0); |  | ||||||
| 
 |  | ||||||
| 	update_current_exec_runtime(curr, now, delta_exec); |  | ||||||
| 
 |  | ||||||
| 	if (!rt_bandwidth_enabled()) | 	if (!rt_bandwidth_enabled()) | ||||||
| 		return; | 		return; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -2212,6 +2212,8 @@ struct affinity_context { | ||||||
| 	unsigned int flags; | 	unsigned int flags; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | extern s64 update_curr_common(struct rq *rq); | ||||||
|  | 
 | ||||||
| struct sched_class { | struct sched_class { | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_UCLAMP_TASK | #ifdef CONFIG_UCLAMP_TASK | ||||||
|  | @ -3262,16 +3264,6 @@ extern int sched_dynamic_mode(const char *str); | ||||||
| extern void sched_dynamic_update(int mode); | extern void sched_dynamic_update(int mode); | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| static inline void update_current_exec_runtime(struct task_struct *curr, |  | ||||||
| 						u64 now, u64 delta_exec) |  | ||||||
| { |  | ||||||
| 	curr->se.sum_exec_runtime += delta_exec; |  | ||||||
| 	account_group_exec_runtime(curr, delta_exec); |  | ||||||
| 
 |  | ||||||
| 	curr->se.exec_start = now; |  | ||||||
| 	cgroup_account_cputime(curr, delta_exec); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| #ifdef CONFIG_SCHED_MM_CID | #ifdef CONFIG_SCHED_MM_CID | ||||||
| 
 | 
 | ||||||
| #define SCHED_MM_CID_PERIOD_NS	(100ULL * 1000000)	/* 100ms */ | #define SCHED_MM_CID_PERIOD_NS	(100ULL * 1000000)	/* 100ms */ | ||||||
|  |  | ||||||
|  | @ -70,18 +70,7 @@ static void yield_task_stop(struct rq *rq) | ||||||
| 
 | 
 | ||||||
| static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) | static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) | ||||||
| { | { | ||||||
| 	struct task_struct *curr = rq->curr; | 	update_curr_common(rq); | ||||||
| 	u64 now, delta_exec; |  | ||||||
| 
 |  | ||||||
| 	now = rq_clock_task(rq); |  | ||||||
| 	delta_exec = now - curr->se.exec_start; |  | ||||||
| 	if (unlikely((s64)delta_exec < 0)) |  | ||||||
| 		delta_exec = 0; |  | ||||||
| 
 |  | ||||||
| 	schedstat_set(curr->stats.exec_max, |  | ||||||
| 		      max(curr->stats.exec_max, delta_exec)); |  | ||||||
| 
 |  | ||||||
| 	update_current_exec_runtime(curr, now, delta_exec); |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Peter Zijlstra
						Peter Zijlstra