mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	sched: Add task_struct pointer to sched_class::set_curr_task
In preparation of further separating pick_next_task() and set_curr_task() we have to pass the actual task into it, while there, rename the thing to better pair with put_prev_task(). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Aaron Lu <aaron.lwe@gmail.com> Cc: Valentin Schneider <valentin.schneider@arm.com> Cc: mingo@kernel.org Cc: Phil Auld <pauld@redhat.com> Cc: Julien Desfossez <jdesfossez@digitalocean.com> Cc: Nishanth Aravamudan <naravamudan@digitalocean.com> Link: https://lkml.kernel.org/r/a96d1bcdd716db4a4c5da2fece647a1456c0ed78.1559129225.git.vpillai@digitalocean.com
This commit is contained in:
		
							parent
							
								
									10e7071b2f
								
							
						
					
					
						commit
						03b7fad167
					
				
					 7 changed files with 49 additions and 47 deletions
				
			
		|  | @ -1494,7 +1494,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) | |||
| 	if (queued) | ||||
| 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); | ||||
| 	if (running) | ||||
| 		set_curr_task(rq, p); | ||||
| 		set_next_task(rq, p); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | @ -4325,7 +4325,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) | |||
| 	if (queued) | ||||
| 		enqueue_task(rq, p, queue_flag); | ||||
| 	if (running) | ||||
| 		set_curr_task(rq, p); | ||||
| 		set_next_task(rq, p); | ||||
| 
 | ||||
| 	check_class_changed(rq, p, prev_class, oldprio); | ||||
| out_unlock: | ||||
|  | @ -4392,7 +4392,7 @@ void set_user_nice(struct task_struct *p, long nice) | |||
| 			resched_curr(rq); | ||||
| 	} | ||||
| 	if (running) | ||||
| 		set_curr_task(rq, p); | ||||
| 		set_next_task(rq, p); | ||||
| out_unlock: | ||||
| 	task_rq_unlock(rq, p, &rf); | ||||
| } | ||||
|  | @ -4840,7 +4840,7 @@ static int __sched_setscheduler(struct task_struct *p, | |||
| 		enqueue_task(rq, p, queue_flags); | ||||
| 	} | ||||
| 	if (running) | ||||
| 		set_curr_task(rq, p); | ||||
| 		set_next_task(rq, p); | ||||
| 
 | ||||
| 	check_class_changed(rq, p, prev_class, oldprio); | ||||
| 
 | ||||
|  | @ -6042,7 +6042,7 @@ void sched_setnuma(struct task_struct *p, int nid) | |||
| 	if (queued) | ||||
| 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); | ||||
| 	if (running) | ||||
| 		set_curr_task(rq, p); | ||||
| 		set_next_task(rq, p); | ||||
| 	task_rq_unlock(rq, p, &rf); | ||||
| } | ||||
| #endif /* CONFIG_NUMA_BALANCING */ | ||||
|  | @ -6919,7 +6919,7 @@ void sched_move_task(struct task_struct *tsk) | |||
| 	if (queued) | ||||
| 		enqueue_task(rq, tsk, queue_flags); | ||||
| 	if (running) | ||||
| 		set_curr_task(rq, tsk); | ||||
| 		set_next_task(rq, tsk); | ||||
| 
 | ||||
| 	task_rq_unlock(rq, tsk, &rf); | ||||
| } | ||||
|  |  | |||
|  | @ -1844,11 +1844,6 @@ static void task_fork_dl(struct task_struct *p) | |||
| 	 */ | ||||
| } | ||||
| 
 | ||||
| static void set_curr_task_dl(struct rq *rq) | ||||
| { | ||||
| 	set_next_task_dl(rq, rq->curr); | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_SMP | ||||
| 
 | ||||
| /* Only try algorithms three times */ | ||||
|  | @ -2466,6 +2461,7 @@ const struct sched_class dl_sched_class = { | |||
| 
 | ||||
| 	.pick_next_task		= pick_next_task_dl, | ||||
| 	.put_prev_task		= put_prev_task_dl, | ||||
| 	.set_next_task		= set_next_task_dl, | ||||
| 
 | ||||
| #ifdef CONFIG_SMP | ||||
| 	.select_task_rq		= select_task_rq_dl, | ||||
|  | @ -2476,7 +2472,6 @@ const struct sched_class dl_sched_class = { | |||
| 	.task_woken		= task_woken_dl, | ||||
| #endif | ||||
| 
 | ||||
| 	.set_curr_task		= set_curr_task_dl, | ||||
| 	.task_tick		= task_tick_dl, | ||||
| 	.task_fork              = task_fork_dl, | ||||
| 
 | ||||
|  |  | |||
|  | @ -10150,9 +10150,19 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p) | |||
|  * This routine is mostly called to set cfs_rq->curr field when a task | ||||
|  * migrates between groups/classes. | ||||
|  */ | ||||
| static void set_curr_task_fair(struct rq *rq) | ||||
| static void set_next_task_fair(struct rq *rq, struct task_struct *p) | ||||
| { | ||||
| 	struct sched_entity *se = &rq->curr->se; | ||||
| 	struct sched_entity *se = &p->se; | ||||
| 
 | ||||
| #ifdef CONFIG_SMP | ||||
| 	if (task_on_rq_queued(p)) { | ||||
| 		/*
 | ||||
| 		 * Move the next running task to the front of the list, so our | ||||
| 		 * cfs_tasks list becomes MRU one. | ||||
| 		 */ | ||||
| 		list_move(&se->group_node, &rq->cfs_tasks); | ||||
| 	} | ||||
| #endif | ||||
| 
 | ||||
| 	for_each_sched_entity(se) { | ||||
| 		struct cfs_rq *cfs_rq = cfs_rq_of(se); | ||||
|  | @ -10423,7 +10433,9 @@ const struct sched_class fair_sched_class = { | |||
| 	.check_preempt_curr	= check_preempt_wakeup, | ||||
| 
 | ||||
| 	.pick_next_task		= pick_next_task_fair, | ||||
| 
 | ||||
| 	.put_prev_task		= put_prev_task_fair, | ||||
| 	.set_next_task          = set_next_task_fair, | ||||
| 
 | ||||
| #ifdef CONFIG_SMP | ||||
| 	.select_task_rq		= select_task_rq_fair, | ||||
|  | @ -10436,7 +10448,6 @@ const struct sched_class fair_sched_class = { | |||
| 	.set_cpus_allowed	= set_cpus_allowed_common, | ||||
| #endif | ||||
| 
 | ||||
| 	.set_curr_task          = set_curr_task_fair, | ||||
| 	.task_tick		= task_tick_fair, | ||||
| 	.task_fork		= task_fork_fair, | ||||
| 
 | ||||
|  |  | |||
|  | @ -374,14 +374,25 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl | |||
| 	resched_curr(rq); | ||||
| } | ||||
| 
 | ||||
| static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) | ||||
| { | ||||
| } | ||||
| 
 | ||||
| static void set_next_task_idle(struct rq *rq, struct task_struct *next) | ||||
| { | ||||
| 	update_idle_core(rq); | ||||
| 	schedstat_inc(rq->sched_goidle); | ||||
| } | ||||
| 
 | ||||
| static struct task_struct * | ||||
| pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | ||||
| { | ||||
| 	put_prev_task(rq, prev); | ||||
| 	update_idle_core(rq); | ||||
| 	schedstat_inc(rq->sched_goidle); | ||||
| 	struct task_struct *next = rq->idle; | ||||
| 
 | ||||
| 	return rq->idle; | ||||
| 	put_prev_task(rq, prev); | ||||
| 	set_next_task_idle(rq, next); | ||||
| 
 | ||||
| 	return next; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | @ -397,10 +408,6 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) | |||
| 	raw_spin_lock_irq(&rq->lock); | ||||
| } | ||||
| 
 | ||||
| static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) | ||||
| { | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * scheduler tick hitting a task of our scheduling class. | ||||
|  * | ||||
|  | @ -413,10 +420,6 @@ static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) | |||
| { | ||||
| } | ||||
| 
 | ||||
| static void set_curr_task_idle(struct rq *rq) | ||||
| { | ||||
| } | ||||
| 
 | ||||
| static void switched_to_idle(struct rq *rq, struct task_struct *p) | ||||
| { | ||||
| 	BUG(); | ||||
|  | @ -451,13 +454,13 @@ const struct sched_class idle_sched_class = { | |||
| 
 | ||||
| 	.pick_next_task		= pick_next_task_idle, | ||||
| 	.put_prev_task		= put_prev_task_idle, | ||||
| 	.set_next_task          = set_next_task_idle, | ||||
| 
 | ||||
| #ifdef CONFIG_SMP | ||||
| 	.select_task_rq		= select_task_rq_idle, | ||||
| 	.set_cpus_allowed	= set_cpus_allowed_common, | ||||
| #endif | ||||
| 
 | ||||
| 	.set_curr_task          = set_curr_task_idle, | ||||
| 	.task_tick		= task_tick_idle, | ||||
| 
 | ||||
| 	.get_rr_interval	= get_rr_interval_idle, | ||||
|  |  | |||
|  | @ -2354,11 +2354,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) | |||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void set_curr_task_rt(struct rq *rq) | ||||
| { | ||||
| 	set_next_task_rt(rq, rq->curr); | ||||
| } | ||||
| 
 | ||||
| static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) | ||||
| { | ||||
| 	/*
 | ||||
|  | @ -2380,6 +2375,7 @@ const struct sched_class rt_sched_class = { | |||
| 
 | ||||
| 	.pick_next_task		= pick_next_task_rt, | ||||
| 	.put_prev_task		= put_prev_task_rt, | ||||
| 	.set_next_task          = set_next_task_rt, | ||||
| 
 | ||||
| #ifdef CONFIG_SMP | ||||
| 	.select_task_rq		= select_task_rq_rt, | ||||
|  | @ -2391,7 +2387,6 @@ const struct sched_class rt_sched_class = { | |||
| 	.switched_from		= switched_from_rt, | ||||
| #endif | ||||
| 
 | ||||
| 	.set_curr_task          = set_curr_task_rt, | ||||
| 	.task_tick		= task_tick_rt, | ||||
| 
 | ||||
| 	.get_rr_interval	= get_rr_interval_rt, | ||||
|  |  | |||
|  | @ -1707,6 +1707,7 @@ struct sched_class { | |||
| 					       struct task_struct *prev, | ||||
| 					       struct rq_flags *rf); | ||||
| 	void (*put_prev_task)(struct rq *rq, struct task_struct *p); | ||||
| 	void (*set_next_task)(struct rq *rq, struct task_struct *p); | ||||
| 
 | ||||
| #ifdef CONFIG_SMP | ||||
| 	int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); | ||||
|  | @ -1721,7 +1722,6 @@ struct sched_class { | |||
| 	void (*rq_offline)(struct rq *rq); | ||||
| #endif | ||||
| 
 | ||||
| 	void (*set_curr_task)(struct rq *rq); | ||||
| 	void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); | ||||
| 	void (*task_fork)(struct task_struct *p); | ||||
| 	void (*task_dead)(struct task_struct *p); | ||||
|  | @ -1755,9 +1755,10 @@ static inline void put_prev_task(struct rq *rq, struct task_struct *prev) | |||
| 	prev->sched_class->put_prev_task(rq, prev); | ||||
| } | ||||
| 
 | ||||
| static inline void set_curr_task(struct rq *rq, struct task_struct *curr) | ||||
| static inline void set_next_task(struct rq *rq, struct task_struct *next) | ||||
| { | ||||
| 	curr->sched_class->set_curr_task(rq); | ||||
| 	WARN_ON_ONCE(rq->curr != next); | ||||
| 	next->sched_class->set_next_task(rq, next); | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_SMP | ||||
|  |  | |||
|  | @ -23,6 +23,11 @@ check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) | |||
| 	/* we're never preempted */ | ||||
| } | ||||
| 
 | ||||
| static void set_next_task_stop(struct rq *rq, struct task_struct *stop) | ||||
| { | ||||
| 	stop->se.exec_start = rq_clock_task(rq); | ||||
| } | ||||
| 
 | ||||
| static struct task_struct * | ||||
| pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | ||||
| { | ||||
|  | @ -32,8 +37,7 @@ pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf | |||
| 		return NULL; | ||||
| 
 | ||||
| 	put_prev_task(rq, prev); | ||||
| 
 | ||||
| 	stop->se.exec_start = rq_clock_task(rq); | ||||
| 	set_next_task_stop(rq, stop); | ||||
| 
 | ||||
| 	return stop; | ||||
| } | ||||
|  | @ -86,13 +90,6 @@ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) | |||
| { | ||||
| } | ||||
| 
 | ||||
| static void set_curr_task_stop(struct rq *rq) | ||||
| { | ||||
| 	struct task_struct *stop = rq->stop; | ||||
| 
 | ||||
| 	stop->se.exec_start = rq_clock_task(rq); | ||||
| } | ||||
| 
 | ||||
| static void switched_to_stop(struct rq *rq, struct task_struct *p) | ||||
| { | ||||
| 	BUG(); /* its impossible to change to this class */ | ||||
|  | @ -128,13 +125,13 @@ const struct sched_class stop_sched_class = { | |||
| 
 | ||||
| 	.pick_next_task		= pick_next_task_stop, | ||||
| 	.put_prev_task		= put_prev_task_stop, | ||||
| 	.set_next_task          = set_next_task_stop, | ||||
| 
 | ||||
| #ifdef CONFIG_SMP | ||||
| 	.select_task_rq		= select_task_rq_stop, | ||||
| 	.set_cpus_allowed	= set_cpus_allowed_common, | ||||
| #endif | ||||
| 
 | ||||
| 	.set_curr_task          = set_curr_task_stop, | ||||
| 	.task_tick		= task_tick_stop, | ||||
| 
 | ||||
| 	.get_rr_interval	= get_rr_interval_stop, | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Peter Zijlstra
						Peter Zijlstra