forked from mirrors/linux
		
	sched: Allow sched_class::dequeue_task() to fail
Change the function signature of sched_class::dequeue_task() to return a boolean, allowing future patches to 'fail' dequeue. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Valentin Schneider <vschneid@redhat.com> Tested-by: Valentin Schneider <vschneid@redhat.com> Link: https://lkml.kernel.org/r/20240727105028.864630153@infradead.org
This commit is contained in:
		
							parent
							
								
									3b3dd89b8b
								
							
						
					
					
						commit
						863ccdbb91
					
				
					 7 changed files with 20 additions and 9 deletions
				
			
		|  | @ -2001,7 +2001,10 @@ void enqueue_task(struct rq *rq, struct task_struct *p, int flags) | ||||||
| 		sched_core_enqueue(rq, p); | 		sched_core_enqueue(rq, p); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void dequeue_task(struct rq *rq, struct task_struct *p, int flags) | /*
 | ||||||
|  |  * Must only return false when DEQUEUE_SLEEP. | ||||||
|  |  */ | ||||||
|  | inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags) | ||||||
| { | { | ||||||
| 	if (sched_core_enabled(rq)) | 	if (sched_core_enabled(rq)) | ||||||
| 		sched_core_dequeue(rq, p, flags); | 		sched_core_dequeue(rq, p, flags); | ||||||
|  | @ -2015,7 +2018,7 @@ void dequeue_task(struct rq *rq, struct task_struct *p, int flags) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	uclamp_rq_dec(rq, p); | 	uclamp_rq_dec(rq, p); | ||||||
| 	p->sched_class->dequeue_task(rq, p, flags); | 	return p->sched_class->dequeue_task(rq, p, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void activate_task(struct rq *rq, struct task_struct *p, int flags) | void activate_task(struct rq *rq, struct task_struct *p, int flags) | ||||||
|  |  | ||||||
|  | @ -2162,7 +2162,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) | ||||||
| 		enqueue_pushable_dl_task(rq, p); | 		enqueue_pushable_dl_task(rq, p); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) | static bool dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) | ||||||
| { | { | ||||||
| 	update_curr_dl(rq); | 	update_curr_dl(rq); | ||||||
| 
 | 
 | ||||||
|  | @ -2172,6 +2172,8 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) | ||||||
| 	dequeue_dl_entity(&p->dl, flags); | 	dequeue_dl_entity(&p->dl, flags); | ||||||
| 	if (!p->dl.dl_throttled && !dl_server(&p->dl)) | 	if (!p->dl.dl_throttled && !dl_server(&p->dl)) | ||||||
| 		dequeue_pushable_dl_task(rq, p); | 		dequeue_pushable_dl_task(rq, p); | ||||||
|  | 
 | ||||||
|  | 	return true; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  |  | ||||||
|  | @ -6865,7 +6865,7 @@ static void set_next_buddy(struct sched_entity *se); | ||||||
|  * decreased. We remove the task from the rbtree and |  * decreased. We remove the task from the rbtree and | ||||||
|  * update the fair scheduling stats: |  * update the fair scheduling stats: | ||||||
|  */ |  */ | ||||||
| static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) | static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) | ||||||
| { | { | ||||||
| 	struct cfs_rq *cfs_rq; | 	struct cfs_rq *cfs_rq; | ||||||
| 	struct sched_entity *se = &p->se; | 	struct sched_entity *se = &p->se; | ||||||
|  | @ -6937,6 +6937,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) | ||||||
| dequeue_throttle: | dequeue_throttle: | ||||||
| 	util_est_update(&rq->cfs, p, task_sleep); | 	util_est_update(&rq->cfs, p, task_sleep); | ||||||
| 	hrtick_update(rq); | 	hrtick_update(rq); | ||||||
|  | 
 | ||||||
|  | 	return true; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_SMP | #ifdef CONFIG_SMP | ||||||
|  |  | ||||||
|  | @ -482,13 +482,14 @@ struct task_struct *pick_next_task_idle(struct rq *rq) | ||||||
|  * It is not legal to sleep in the idle task - print a warning |  * It is not legal to sleep in the idle task - print a warning | ||||||
|  * message if some code attempts to do it: |  * message if some code attempts to do it: | ||||||
|  */ |  */ | ||||||
| static void | static bool | ||||||
| dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) | dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) | ||||||
| { | { | ||||||
| 	raw_spin_rq_unlock_irq(rq); | 	raw_spin_rq_unlock_irq(rq); | ||||||
| 	printk(KERN_ERR "bad: scheduling from the idle thread!\n"); | 	printk(KERN_ERR "bad: scheduling from the idle thread!\n"); | ||||||
| 	dump_stack(); | 	dump_stack(); | ||||||
| 	raw_spin_rq_lock_irq(rq); | 	raw_spin_rq_lock_irq(rq); | ||||||
|  | 	return true; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  |  | ||||||
|  | @ -1483,7 +1483,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) | ||||||
| 		enqueue_pushable_task(rq, p); | 		enqueue_pushable_task(rq, p); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) | static bool dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) | ||||||
| { | { | ||||||
| 	struct sched_rt_entity *rt_se = &p->rt; | 	struct sched_rt_entity *rt_se = &p->rt; | ||||||
| 
 | 
 | ||||||
|  | @ -1491,6 +1491,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) | ||||||
| 	dequeue_rt_entity(rt_se, flags); | 	dequeue_rt_entity(rt_se, flags); | ||||||
| 
 | 
 | ||||||
| 	dequeue_pushable_task(rq, p); | 	dequeue_pushable_task(rq, p); | ||||||
|  | 
 | ||||||
|  | 	return true; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  |  | ||||||
|  | @ -2285,7 +2285,7 @@ struct sched_class { | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); | 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); | ||||||
| 	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); | 	bool (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); | ||||||
| 	void (*yield_task)   (struct rq *rq); | 	void (*yield_task)   (struct rq *rq); | ||||||
| 	bool (*yield_to_task)(struct rq *rq, struct task_struct *p); | 	bool (*yield_to_task)(struct rq *rq, struct task_struct *p); | ||||||
| 
 | 
 | ||||||
|  | @ -3606,7 +3606,7 @@ extern int __sched_setaffinity(struct task_struct *p, struct affinity_context *c | ||||||
| extern void __setscheduler_prio(struct task_struct *p, int prio); | extern void __setscheduler_prio(struct task_struct *p, int prio); | ||||||
| extern void set_load_weight(struct task_struct *p, bool update_load); | extern void set_load_weight(struct task_struct *p, bool update_load); | ||||||
| extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags); | extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags); | ||||||
| extern void dequeue_task(struct rq *rq, struct task_struct *p, int flags); | extern bool dequeue_task(struct rq *rq, struct task_struct *p, int flags); | ||||||
| 
 | 
 | ||||||
| extern void check_class_changed(struct rq *rq, struct task_struct *p, | extern void check_class_changed(struct rq *rq, struct task_struct *p, | ||||||
| 				const struct sched_class *prev_class, | 				const struct sched_class *prev_class, | ||||||
|  |  | ||||||
|  | @ -57,10 +57,11 @@ enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) | ||||||
| 	add_nr_running(rq, 1); | 	add_nr_running(rq, 1); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void | static bool | ||||||
| dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) | dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) | ||||||
| { | { | ||||||
| 	sub_nr_running(rq, 1); | 	sub_nr_running(rq, 1); | ||||||
|  | 	return true; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void yield_task_stop(struct rq *rq) | static void yield_task_stop(struct rq *rq) | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Peter Zijlstra
						Peter Zijlstra