forked from mirrors/linux
		
	sched: Allow sched_class::dequeue_task() to fail
Change the function signature of sched_class::dequeue_task() to return a boolean, allowing future patches to 'fail' dequeue. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Valentin Schneider <vschneid@redhat.com> Tested-by: Valentin Schneider <vschneid@redhat.com> Link: https://lkml.kernel.org/r/20240727105028.864630153@infradead.org
This commit is contained in:
		
							parent
							
								
									3b3dd89b8b
								
							
						
					
					
						commit
						863ccdbb91
					
				
					 7 changed files with 20 additions and 9 deletions
				
			
		|  | @ -2001,7 +2001,10 @@ void enqueue_task(struct rq *rq, struct task_struct *p, int flags) | |||
| 		sched_core_enqueue(rq, p); | ||||
| } | ||||
| 
 | ||||
| void dequeue_task(struct rq *rq, struct task_struct *p, int flags) | ||||
| /*
 | ||||
|  * Must only return false when DEQUEUE_SLEEP. | ||||
|  */ | ||||
| inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags) | ||||
| { | ||||
| 	if (sched_core_enabled(rq)) | ||||
| 		sched_core_dequeue(rq, p, flags); | ||||
|  | @ -2015,7 +2018,7 @@ void dequeue_task(struct rq *rq, struct task_struct *p, int flags) | |||
| 	} | ||||
| 
 | ||||
| 	uclamp_rq_dec(rq, p); | ||||
| 	p->sched_class->dequeue_task(rq, p, flags); | ||||
| 	return p->sched_class->dequeue_task(rq, p, flags); | ||||
| } | ||||
| 
 | ||||
| void activate_task(struct rq *rq, struct task_struct *p, int flags) | ||||
|  |  | |||
|  | @ -2162,7 +2162,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) | |||
| 		enqueue_pushable_dl_task(rq, p); | ||||
| } | ||||
| 
 | ||||
| static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) | ||||
| static bool dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) | ||||
| { | ||||
| 	update_curr_dl(rq); | ||||
| 
 | ||||
|  | @ -2172,6 +2172,8 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) | |||
| 	dequeue_dl_entity(&p->dl, flags); | ||||
| 	if (!p->dl.dl_throttled && !dl_server(&p->dl)) | ||||
| 		dequeue_pushable_dl_task(rq, p); | ||||
| 
 | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  |  | |||
|  | @ -6865,7 +6865,7 @@ static void set_next_buddy(struct sched_entity *se); | |||
|  * decreased. We remove the task from the rbtree and | ||||
|  * update the fair scheduling stats: | ||||
|  */ | ||||
| static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) | ||||
| static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) | ||||
| { | ||||
| 	struct cfs_rq *cfs_rq; | ||||
| 	struct sched_entity *se = &p->se; | ||||
|  | @ -6937,6 +6937,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) | |||
| dequeue_throttle: | ||||
| 	util_est_update(&rq->cfs, p, task_sleep); | ||||
| 	hrtick_update(rq); | ||||
| 
 | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_SMP | ||||
|  |  | |||
|  | @ -482,13 +482,14 @@ struct task_struct *pick_next_task_idle(struct rq *rq) | |||
|  * It is not legal to sleep in the idle task - print a warning | ||||
|  * message if some code attempts to do it: | ||||
|  */ | ||||
| static void | ||||
| static bool | ||||
| dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) | ||||
| { | ||||
| 	raw_spin_rq_unlock_irq(rq); | ||||
| 	printk(KERN_ERR "bad: scheduling from the idle thread!\n"); | ||||
| 	dump_stack(); | ||||
| 	raw_spin_rq_lock_irq(rq); | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  |  | |||
|  | @ -1483,7 +1483,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) | |||
| 		enqueue_pushable_task(rq, p); | ||||
| } | ||||
| 
 | ||||
| static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) | ||||
| static bool dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) | ||||
| { | ||||
| 	struct sched_rt_entity *rt_se = &p->rt; | ||||
| 
 | ||||
|  | @ -1491,6 +1491,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) | |||
| 	dequeue_rt_entity(rt_se, flags); | ||||
| 
 | ||||
| 	dequeue_pushable_task(rq, p); | ||||
| 
 | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  |  | |||
|  | @ -2285,7 +2285,7 @@ struct sched_class { | |||
| #endif | ||||
| 
 | ||||
| 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); | ||||
| 	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); | ||||
| 	bool (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); | ||||
| 	void (*yield_task)   (struct rq *rq); | ||||
| 	bool (*yield_to_task)(struct rq *rq, struct task_struct *p); | ||||
| 
 | ||||
|  | @ -3606,7 +3606,7 @@ extern int __sched_setaffinity(struct task_struct *p, struct affinity_context *c | |||
| extern void __setscheduler_prio(struct task_struct *p, int prio); | ||||
| extern void set_load_weight(struct task_struct *p, bool update_load); | ||||
| extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags); | ||||
| extern void dequeue_task(struct rq *rq, struct task_struct *p, int flags); | ||||
| extern bool dequeue_task(struct rq *rq, struct task_struct *p, int flags); | ||||
| 
 | ||||
| extern void check_class_changed(struct rq *rq, struct task_struct *p, | ||||
| 				const struct sched_class *prev_class, | ||||
|  |  | |||
|  | @ -57,10 +57,11 @@ enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) | |||
| 	add_nr_running(rq, 1); | ||||
| } | ||||
| 
 | ||||
| static void | ||||
| static bool | ||||
| dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) | ||||
| { | ||||
| 	sub_nr_running(rq, 1); | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| static void yield_task_stop(struct rq *rq) | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Peter Zijlstra
						Peter Zijlstra