mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	sched: Combine the last put_prev_task() and the first set_next_task()
Ensure the last put_prev_task() and the first set_next_task() always go together. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20240813224016.158454756@infradead.org
This commit is contained in:
		
							parent
							
								
									fd03c5b858
								
							
						
					
					
						commit
						436f3eed5c
					
				
					 3 changed files with 14 additions and 16 deletions
				
			
		|  | @ -5894,8 +5894,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | |||
| 		/* Assume the next prioritized class is idle_sched_class */ | ||||
| 		if (!p) { | ||||
| 			p = pick_task_idle(rq); | ||||
| 			put_prev_task(rq, prev); | ||||
| 			set_next_task_first(rq, p); | ||||
| 			put_prev_set_next_task(rq, prev, p); | ||||
| 		} | ||||
| 
 | ||||
| 		/*
 | ||||
|  | @ -5926,8 +5925,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | |||
| 		} else { | ||||
| 			p = class->pick_task(rq); | ||||
| 			if (p) { | ||||
| 				put_prev_task(rq, prev); | ||||
| 				set_next_task_first(rq, p); | ||||
| 				put_prev_set_next_task(rq, prev, p); | ||||
| 				return p; | ||||
| 			} | ||||
| 		} | ||||
|  | @ -6016,13 +6014,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | |||
| 		WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); | ||||
| 
 | ||||
| 		next = rq->core_pick; | ||||
| 		if (next != prev) { | ||||
| 			put_prev_task(rq, prev); | ||||
| 			set_next_task_first(rq, next); | ||||
| 		} | ||||
| 
 | ||||
| 		rq->core_pick = NULL; | ||||
| 		goto out; | ||||
| 		goto out_set_next; | ||||
| 	} | ||||
| 
 | ||||
| 	prev_balance(rq, prev, rf); | ||||
|  | @ -6192,9 +6185,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | |||
| 	} | ||||
| 
 | ||||
| out_set_next: | ||||
| 	put_prev_task(rq, prev); | ||||
| 	set_next_task_first(rq, next); | ||||
| out: | ||||
| 	put_prev_set_next_task(rq, prev, next); | ||||
| 	if (rq->core->core_forceidle_count && next == rq->idle) | ||||
| 		queue_core_balance(rq); | ||||
| 
 | ||||
|  |  | |||
|  | @ -8819,8 +8819,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf | |||
| 
 | ||||
| simple: | ||||
| #endif | ||||
| 	put_prev_task(rq, prev); | ||||
| 	set_next_task_fair(rq, p, true); | ||||
| 	put_prev_set_next_task(rq, prev, p); | ||||
| 	return p; | ||||
| 
 | ||||
| idle: | ||||
|  |  | |||
|  | @ -2370,8 +2370,16 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next) | |||
| 	next->sched_class->set_next_task(rq, next, false); | ||||
| } | ||||
| 
 | ||||
| static inline void set_next_task_first(struct rq *rq, struct task_struct *next) | ||||
| static inline void put_prev_set_next_task(struct rq *rq, | ||||
| 					  struct task_struct *prev, | ||||
| 					  struct task_struct *next) | ||||
| { | ||||
| 	WARN_ON_ONCE(rq->curr != prev); | ||||
| 
 | ||||
| 	if (next == prev) | ||||
| 		return; | ||||
| 
 | ||||
| 	prev->sched_class->put_prev_task(rq, prev); | ||||
| 	next->sched_class->set_next_task(rq, next, true); | ||||
| } | ||||
| 
 | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Peter Zijlstra
						Peter Zijlstra