mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	Merge branch 'tip/sched/core' into for-6.12
- Resolve trivial context conflicts from dl_server clearing being moved around. - Add @next to put_prev_task_scx() and @prev to pick_next_task_scx() to match sched/core. - Merge sched_class->switch_class() addition from sched_ext with tip/sched/core changes in __pick_next_task(). - Make pick_next_task_scx() call put_prev_task_scx() to emulate the previous behavior where sched_class->put_prev_task() was called before sched_class->pick_next_task(). While this makes sched_ext build and function, the behavior is not in line with other sched classes. The follow-up patches will address the discrepancies and remove sched_class->switch_class(). Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
		
						commit
						d7b01aef9d
					
				
					 9 changed files with 180 additions and 218 deletions
				
			
		|  | @ -694,7 +694,6 @@ struct sched_dl_entity { | |||
| 	 */ | ||||
| 	struct rq			*rq; | ||||
| 	dl_server_has_tasks_f		server_has_tasks; | ||||
| 	dl_server_pick_f		server_pick_next; | ||||
| 	dl_server_pick_f		server_pick_task; | ||||
| 
 | ||||
| #ifdef CONFIG_RT_MUTEXES | ||||
|  |  | |||
|  | @ -3690,8 +3690,6 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, | |||
| 		rq->idle_stamp = 0; | ||||
| 	} | ||||
| #endif | ||||
| 
 | ||||
| 	p->dl_server = NULL; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | @ -5895,8 +5893,8 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt) | |||
| 	schedstat_inc(this_rq()->sched_count); | ||||
| } | ||||
| 
 | ||||
| static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, | ||||
| 				  struct rq_flags *rf) | ||||
| static void prev_balance(struct rq *rq, struct task_struct *prev, | ||||
| 			 struct rq_flags *rf) | ||||
| { | ||||
| 	const struct sched_class *start_class = prev->sched_class; | ||||
| 	const struct sched_class *class; | ||||
|  | @ -5923,16 +5921,6 @@ static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, | |||
| 		if (class->balance && class->balance(rq, prev, rf)) | ||||
| 			break; | ||||
| 	} | ||||
| 
 | ||||
| 	put_prev_task(rq, prev); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * We've updated @prev and no longer need the server link, clear it. | ||||
| 	 * Must be done before ->pick_next_task() because that can (re)set | ||||
| 	 * ->dl_server. | ||||
| 	 */ | ||||
| 	if (prev->dl_server) | ||||
| 		prev->dl_server = NULL; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | @ -5944,6 +5932,8 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | |||
| 	const struct sched_class *class; | ||||
| 	struct task_struct *p; | ||||
| 
 | ||||
| 	rq->dl_server = NULL; | ||||
| 
 | ||||
| 	if (scx_enabled()) | ||||
| 		goto restart; | ||||
| 
 | ||||
|  | @ -5962,38 +5952,37 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | |||
| 
 | ||||
| 		/* Assume the next prioritized class is idle_sched_class */ | ||||
| 		if (!p) { | ||||
| 			put_prev_task(rq, prev); | ||||
| 			p = pick_next_task_idle(rq); | ||||
| 			p = pick_task_idle(rq); | ||||
| 			put_prev_set_next_task(rq, prev, p); | ||||
| 		} | ||||
| 
 | ||||
| 		/*
 | ||||
| 		 * This is a normal CFS pick, but the previous could be a DL pick. | ||||
| 		 * Clear it as previous is no longer picked. | ||||
| 		 */ | ||||
| 		if (prev->dl_server) | ||||
| 			prev->dl_server = NULL; | ||||
| 
 | ||||
| 		/*
 | ||||
| 		 * This is the fast path; it cannot be a DL server pick; | ||||
| 		 * therefore even if @p == @prev, ->dl_server must be NULL. | ||||
| 		 */ | ||||
| 		if (p->dl_server) | ||||
| 			p->dl_server = NULL; | ||||
| 
 | ||||
| 		return p; | ||||
| 	} | ||||
| 
 | ||||
| restart: | ||||
| 	put_prev_task_balance(rq, prev, rf); | ||||
| 	prev_balance(rq, prev, rf); | ||||
| 
 | ||||
| 	for_each_active_class(class) { | ||||
| 		p = class->pick_next_task(rq); | ||||
| 		if (p) { | ||||
| 			const struct sched_class *prev_class = prev->sched_class; | ||||
| 		if (class->pick_next_task) { | ||||
| 			p = class->pick_next_task(rq, prev); | ||||
| 			if (p) { | ||||
| 				const struct sched_class *prev_class = prev->sched_class; | ||||
| 
 | ||||
| 			if (class != prev_class && prev_class->switch_class) | ||||
| 				prev_class->switch_class(rq, p); | ||||
| 			return p; | ||||
| 				if (class != prev_class && prev_class->switch_class) | ||||
| 					prev_class->switch_class(rq, p); | ||||
| 				return p; | ||||
| 			} | ||||
| 		} else { | ||||
| 			p = class->pick_task(rq); | ||||
| 			if (p) { | ||||
| 				const struct sched_class *prev_class = prev->sched_class; | ||||
| 
 | ||||
| 				put_prev_set_next_task(rq, prev, p); | ||||
| 
 | ||||
| 				if (class != prev_class && prev_class->switch_class) | ||||
| 					prev_class->switch_class(rq, p); | ||||
| 				return p; | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
|  | @ -6024,6 +6013,8 @@ static inline struct task_struct *pick_task(struct rq *rq) | |||
| 	const struct sched_class *class; | ||||
| 	struct task_struct *p; | ||||
| 
 | ||||
| 	rq->dl_server = NULL; | ||||
| 
 | ||||
| 	for_each_active_class(class) { | ||||
| 		p = class->pick_task(rq); | ||||
| 		if (p) | ||||
|  | @ -6062,6 +6053,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | |||
| 		 * another cpu during offline. | ||||
| 		 */ | ||||
| 		rq->core_pick = NULL; | ||||
| 		rq->core_dl_server = NULL; | ||||
| 		return __pick_next_task(rq, prev, rf); | ||||
| 	} | ||||
| 
 | ||||
|  | @ -6080,16 +6072,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | |||
| 		WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); | ||||
| 
 | ||||
| 		next = rq->core_pick; | ||||
| 		if (next != prev) { | ||||
| 			put_prev_task(rq, prev); | ||||
| 			set_next_task(rq, next); | ||||
| 		} | ||||
| 
 | ||||
| 		rq->dl_server = rq->core_dl_server; | ||||
| 		rq->core_pick = NULL; | ||||
| 		goto out; | ||||
| 		rq->core_dl_server = NULL; | ||||
| 		goto out_set_next; | ||||
| 	} | ||||
| 
 | ||||
| 	put_prev_task_balance(rq, prev, rf); | ||||
| 	prev_balance(rq, prev, rf); | ||||
| 
 | ||||
| 	smt_mask = cpu_smt_mask(cpu); | ||||
| 	need_sync = !!rq->core->core_cookie; | ||||
|  | @ -6130,6 +6119,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | |||
| 		next = pick_task(rq); | ||||
| 		if (!next->core_cookie) { | ||||
| 			rq->core_pick = NULL; | ||||
| 			rq->core_dl_server = NULL; | ||||
| 			/*
 | ||||
| 			 * For robustness, update the min_vruntime_fi for | ||||
| 			 * unconstrained picks as well. | ||||
|  | @ -6157,7 +6147,9 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | |||
| 		if (i != cpu && (rq_i != rq->core || !core_clock_updated)) | ||||
| 			update_rq_clock(rq_i); | ||||
| 
 | ||||
| 		p = rq_i->core_pick = pick_task(rq_i); | ||||
| 		rq_i->core_pick = p = pick_task(rq_i); | ||||
| 		rq_i->core_dl_server = rq_i->dl_server; | ||||
| 
 | ||||
| 		if (!max || prio_less(max, p, fi_before)) | ||||
| 			max = p; | ||||
| 	} | ||||
|  | @ -6181,6 +6173,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | |||
| 		} | ||||
| 
 | ||||
| 		rq_i->core_pick = p; | ||||
| 		rq_i->core_dl_server = NULL; | ||||
| 
 | ||||
| 		if (p == rq_i->idle) { | ||||
| 			if (rq_i->nr_running) { | ||||
|  | @ -6241,6 +6234,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | |||
| 
 | ||||
| 		if (i == cpu) { | ||||
| 			rq_i->core_pick = NULL; | ||||
| 			rq_i->core_dl_server = NULL; | ||||
| 			continue; | ||||
| 		} | ||||
| 
 | ||||
|  | @ -6249,6 +6243,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | |||
| 
 | ||||
| 		if (rq_i->curr == rq_i->core_pick) { | ||||
| 			rq_i->core_pick = NULL; | ||||
| 			rq_i->core_dl_server = NULL; | ||||
| 			continue; | ||||
| 		} | ||||
| 
 | ||||
|  | @ -6256,8 +6251,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | |||
| 	} | ||||
| 
 | ||||
| out_set_next: | ||||
| 	set_next_task(rq, next); | ||||
| out: | ||||
| 	put_prev_set_next_task(rq, prev, next); | ||||
| 	if (rq->core->core_forceidle_count && next == rq->idle) | ||||
| 		queue_core_balance(rq); | ||||
| 
 | ||||
|  | @ -8487,6 +8481,7 @@ void __init sched_init(void) | |||
| #ifdef CONFIG_SCHED_CORE | ||||
| 		rq->core = rq; | ||||
| 		rq->core_pick = NULL; | ||||
| 		rq->core_dl_server = NULL; | ||||
| 		rq->core_enabled = 0; | ||||
| 		rq->core_tree = RB_ROOT; | ||||
| 		rq->core_forceidle_count = 0; | ||||
|  |  | |||
|  | @ -1665,12 +1665,10 @@ void dl_server_stop(struct sched_dl_entity *dl_se) | |||
| 
 | ||||
| void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, | ||||
| 		    dl_server_has_tasks_f has_tasks, | ||||
| 		    dl_server_pick_f pick_next, | ||||
| 		    dl_server_pick_f pick_task) | ||||
| { | ||||
| 	dl_se->rq = rq; | ||||
| 	dl_se->server_has_tasks = has_tasks; | ||||
| 	dl_se->server_pick_next = pick_next; | ||||
| 	dl_se->server_pick_task = pick_task; | ||||
| } | ||||
| 
 | ||||
|  | @ -1896,46 +1894,40 @@ static inline bool __dl_less(struct rb_node *a, const struct rb_node *b) | |||
| 	return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline); | ||||
| } | ||||
| 
 | ||||
| static inline struct sched_statistics * | ||||
| static __always_inline struct sched_statistics * | ||||
| __schedstats_from_dl_se(struct sched_dl_entity *dl_se) | ||||
| { | ||||
| 	if (!schedstat_enabled()) | ||||
| 		return NULL; | ||||
| 
 | ||||
| 	if (dl_server(dl_se)) | ||||
| 		return NULL; | ||||
| 
 | ||||
| 	return &dl_task_of(dl_se)->stats; | ||||
| } | ||||
| 
 | ||||
| static inline void | ||||
| update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) | ||||
| { | ||||
| 	struct sched_statistics *stats; | ||||
| 
 | ||||
| 	if (!schedstat_enabled()) | ||||
| 		return; | ||||
| 
 | ||||
| 	stats = __schedstats_from_dl_se(dl_se); | ||||
| 	__update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); | ||||
| 	struct sched_statistics *stats = __schedstats_from_dl_se(dl_se); | ||||
| 	if (stats) | ||||
| 		__update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); | ||||
| } | ||||
| 
 | ||||
| static inline void | ||||
| update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) | ||||
| { | ||||
| 	struct sched_statistics *stats; | ||||
| 
 | ||||
| 	if (!schedstat_enabled()) | ||||
| 		return; | ||||
| 
 | ||||
| 	stats = __schedstats_from_dl_se(dl_se); | ||||
| 	__update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); | ||||
| 	struct sched_statistics *stats = __schedstats_from_dl_se(dl_se); | ||||
| 	if (stats) | ||||
| 		__update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); | ||||
| } | ||||
| 
 | ||||
| static inline void | ||||
| update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) | ||||
| { | ||||
| 	struct sched_statistics *stats; | ||||
| 
 | ||||
| 	if (!schedstat_enabled()) | ||||
| 		return; | ||||
| 
 | ||||
| 	stats = __schedstats_from_dl_se(dl_se); | ||||
| 	__update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); | ||||
| 	struct sched_statistics *stats = __schedstats_from_dl_se(dl_se); | ||||
| 	if (stats) | ||||
| 		__update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); | ||||
| } | ||||
| 
 | ||||
| static inline void | ||||
|  | @ -2392,6 +2384,9 @@ static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first) | |||
| 		update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); | ||||
| 
 | ||||
| 	deadline_queue_push_tasks(rq); | ||||
| 
 | ||||
| 	if (hrtick_enabled(rq)) | ||||
| 		start_hrtick_dl(rq, &p->dl); | ||||
| } | ||||
| 
 | ||||
| static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq) | ||||
|  | @ -2407,9 +2402,8 @@ static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq) | |||
| /*
 | ||||
|  * __pick_next_task_dl - Helper to pick the next -deadline task to run. | ||||
|  * @rq: The runqueue to pick the next task from. | ||||
|  * @peek: If true, just peek at the next task. Only relevant for dlserver. | ||||
|  */ | ||||
| static struct task_struct *__pick_next_task_dl(struct rq *rq, bool peek) | ||||
| static struct task_struct *__pick_task_dl(struct rq *rq) | ||||
| { | ||||
| 	struct sched_dl_entity *dl_se; | ||||
| 	struct dl_rq *dl_rq = &rq->dl; | ||||
|  | @ -2423,16 +2417,13 @@ static struct task_struct *__pick_next_task_dl(struct rq *rq, bool peek) | |||
| 	WARN_ON_ONCE(!dl_se); | ||||
| 
 | ||||
| 	if (dl_server(dl_se)) { | ||||
| 		if (IS_ENABLED(CONFIG_SMP) && peek) | ||||
| 			p = dl_se->server_pick_task(dl_se); | ||||
| 		else | ||||
| 			p = dl_se->server_pick_next(dl_se); | ||||
| 		p = dl_se->server_pick_task(dl_se); | ||||
| 		if (!p) { | ||||
| 			dl_se->dl_yielded = 1; | ||||
| 			update_curr_dl_se(rq, dl_se, 0); | ||||
| 			goto again; | ||||
| 		} | ||||
| 		p->dl_server = dl_se; | ||||
| 		rq->dl_server = dl_se; | ||||
| 	} else { | ||||
| 		p = dl_task_of(dl_se); | ||||
| 	} | ||||
|  | @ -2440,31 +2431,12 @@ static struct task_struct *__pick_next_task_dl(struct rq *rq, bool peek) | |||
| 	return p; | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_SMP | ||||
| static struct task_struct *pick_task_dl(struct rq *rq) | ||||
| { | ||||
| 	return __pick_next_task_dl(rq, true); | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
| static struct task_struct *pick_next_task_dl(struct rq *rq) | ||||
| { | ||||
| 	struct task_struct *p; | ||||
| 
 | ||||
| 	p = __pick_next_task_dl(rq, false); | ||||
| 	if (!p) | ||||
| 		return p; | ||||
| 
 | ||||
| 	if (!p->dl_server) | ||||
| 		set_next_task_dl(rq, p, true); | ||||
| 
 | ||||
| 	if (hrtick_enabled(rq)) | ||||
| 		start_hrtick_dl(rq, &p->dl); | ||||
| 
 | ||||
| 	return p; | ||||
| 	return __pick_task_dl(rq); | ||||
| } | ||||
| 
 | ||||
| static void put_prev_task_dl(struct rq *rq, struct task_struct *p) | ||||
| static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_struct *next) | ||||
| { | ||||
| 	struct sched_dl_entity *dl_se = &p->dl; | ||||
| 	struct dl_rq *dl_rq = &rq->dl; | ||||
|  | @ -3156,13 +3128,12 @@ DEFINE_SCHED_CLASS(dl) = { | |||
| 
 | ||||
| 	.wakeup_preempt		= wakeup_preempt_dl, | ||||
| 
 | ||||
| 	.pick_next_task		= pick_next_task_dl, | ||||
| 	.pick_task		= pick_task_dl, | ||||
| 	.put_prev_task		= put_prev_task_dl, | ||||
| 	.set_next_task		= set_next_task_dl, | ||||
| 
 | ||||
| #ifdef CONFIG_SMP | ||||
| 	.balance		= balance_dl, | ||||
| 	.pick_task		= pick_task_dl, | ||||
| 	.select_task_rq		= select_task_rq_dl, | ||||
| 	.migrate_task_rq	= migrate_task_rq_dl, | ||||
| 	.set_cpus_allowed       = set_cpus_allowed_dl, | ||||
|  |  | |||
|  | @ -2719,7 +2719,8 @@ static void process_ddsp_deferred_locals(struct rq *rq) | |||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void put_prev_task_scx(struct rq *rq, struct task_struct *p) | ||||
| static void put_prev_task_scx(struct rq *rq, struct task_struct *p, | ||||
| 			      struct task_struct *next) | ||||
| { | ||||
| 	update_curr_scx(rq); | ||||
| 
 | ||||
|  | @ -2774,14 +2775,21 @@ static struct task_struct *first_local_task(struct rq *rq) | |||
| 					struct task_struct, scx.dsq_list.node); | ||||
| } | ||||
| 
 | ||||
| static struct task_struct *pick_next_task_scx(struct rq *rq) | ||||
| static struct task_struct *pick_next_task_scx(struct rq *rq, | ||||
| 					      struct task_struct *prev) | ||||
| { | ||||
| 	struct task_struct *p; | ||||
| 
 | ||||
| 	if (prev->sched_class == &ext_sched_class) | ||||
| 		put_prev_task_scx(rq, prev, NULL); | ||||
| 
 | ||||
| 	p = first_local_task(rq); | ||||
| 	if (!p) | ||||
| 		return NULL; | ||||
| 
 | ||||
| 	if (prev->sched_class != &ext_sched_class) | ||||
| 		prev->sched_class->put_prev_task(rq, prev, p); | ||||
| 
 | ||||
| 	set_next_task_scx(rq, p, true); | ||||
| 
 | ||||
| 	if (unlikely(!p->scx.slice)) { | ||||
|  |  | |||
|  | @ -5457,6 +5457,13 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 
 | ||||
| static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); | ||||
| 
 | ||||
| static inline void finish_delayed_dequeue_entity(struct sched_entity *se) | ||||
| { | ||||
| 	se->sched_delayed = 0; | ||||
| 	if (sched_feat(DELAY_ZERO) && se->vlag > 0) | ||||
| 		se->vlag = 0; | ||||
| } | ||||
| 
 | ||||
| static bool | ||||
| dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | ||||
| { | ||||
|  | @ -5532,11 +5539,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | |||
| 	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE) | ||||
| 		update_min_vruntime(cfs_rq); | ||||
| 
 | ||||
| 	if (flags & DEQUEUE_DELAYED) { | ||||
| 		se->sched_delayed = 0; | ||||
| 		if (sched_feat(DELAY_ZERO) && se->vlag > 0) | ||||
| 			se->vlag = 0; | ||||
| 	} | ||||
| 	if (flags & DEQUEUE_DELAYED) | ||||
| 		finish_delayed_dequeue_entity(se); | ||||
| 
 | ||||
| 	if (cfs_rq->nr_running == 0) | ||||
| 		update_idle_cfs_rq_clock_pelt(cfs_rq); | ||||
|  | @ -8746,17 +8750,12 @@ static struct task_struct *pick_task_fair(struct rq *rq) | |||
| 		cfs_rq = group_cfs_rq(se); | ||||
| 	} while (cfs_rq); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * This can be called from directly from CFS's ->pick_task() or indirectly | ||||
| 	 * from DL's ->pick_task when fair server is enabled. In the indirect case, | ||||
| 	 * DL will set ->dl_server just after this function is called, so its Ok to | ||||
| 	 * clear. In the direct case, we are picking directly so we must clear it. | ||||
| 	 */ | ||||
| 	task_of(se)->dl_server = NULL; | ||||
| 
 | ||||
| 	return task_of(se); | ||||
| } | ||||
| 
 | ||||
| static void __set_next_task_fair(struct rq *rq, struct task_struct *p, bool first); | ||||
| static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first); | ||||
| 
 | ||||
| struct task_struct * | ||||
| pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | ||||
| { | ||||
|  | @ -8771,9 +8770,11 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf | |||
| 	se = &p->se; | ||||
| 
 | ||||
| #ifdef CONFIG_FAIR_GROUP_SCHED | ||||
| 	if (!prev || prev->sched_class != &fair_sched_class) | ||||
| 	if (prev->sched_class != &fair_sched_class) | ||||
| 		goto simple; | ||||
| 
 | ||||
| 	__put_prev_set_next_dl_server(rq, prev, p); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Because of the set_next_buddy() in dequeue_task_fair() it is rather | ||||
| 	 * likely that a next task is from the same cgroup as the current. | ||||
|  | @ -8805,33 +8806,15 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf | |||
| 
 | ||||
| 		put_prev_entity(cfs_rq, pse); | ||||
| 		set_next_entity(cfs_rq, se); | ||||
| 
 | ||||
| 		__set_next_task_fair(rq, p, true); | ||||
| 	} | ||||
| 
 | ||||
| 	goto done; | ||||
| 	return p; | ||||
| 
 | ||||
| simple: | ||||
| #endif | ||||
| 	if (prev) | ||||
| 		put_prev_task(rq, prev); | ||||
| 
 | ||||
| 	for_each_sched_entity(se) | ||||
| 		set_next_entity(cfs_rq_of(se), se); | ||||
| 
 | ||||
| done: __maybe_unused; | ||||
| #ifdef CONFIG_SMP | ||||
| 	/*
 | ||||
| 	 * Move the next running task to the front of | ||||
| 	 * the list, so our cfs_tasks list becomes MRU | ||||
| 	 * one. | ||||
| 	 */ | ||||
| 	list_move(&p->se.group_node, &rq->cfs_tasks); | ||||
| #endif | ||||
| 
 | ||||
| 	if (hrtick_enabled_fair(rq)) | ||||
| 		hrtick_start_fair(rq, p); | ||||
| 
 | ||||
| 	update_misfit_status(p, rq); | ||||
| 	sched_fair_update_stop_tick(rq, p); | ||||
| 
 | ||||
| 	put_prev_set_next_task(rq, prev, p); | ||||
| 	return p; | ||||
| 
 | ||||
| idle: | ||||
|  | @ -8860,9 +8843,9 @@ done: __maybe_unused; | |||
| 	return NULL; | ||||
| } | ||||
| 
 | ||||
| static struct task_struct *__pick_next_task_fair(struct rq *rq) | ||||
| static struct task_struct *__pick_next_task_fair(struct rq *rq, struct task_struct *prev) | ||||
| { | ||||
| 	return pick_next_task_fair(rq, NULL, NULL); | ||||
| 	return pick_next_task_fair(rq, prev, NULL); | ||||
| } | ||||
| 
 | ||||
| static bool fair_server_has_tasks(struct sched_dl_entity *dl_se) | ||||
|  | @ -8872,16 +8855,7 @@ static bool fair_server_has_tasks(struct sched_dl_entity *dl_se) | |||
| 
 | ||||
| static struct task_struct *fair_server_pick_task(struct sched_dl_entity *dl_se) | ||||
| { | ||||
| #ifdef CONFIG_SMP | ||||
| 	return pick_task_fair(dl_se->rq); | ||||
| #else | ||||
| 	return NULL; | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
| static struct task_struct *fair_server_pick_next(struct sched_dl_entity *dl_se) | ||||
| { | ||||
| 	return pick_next_task_fair(dl_se->rq, NULL, NULL); | ||||
| } | ||||
| 
 | ||||
| void fair_server_init(struct rq *rq) | ||||
|  | @ -8890,15 +8864,13 @@ void fair_server_init(struct rq *rq) | |||
| 
 | ||||
| 	init_dl_entity(dl_se); | ||||
| 
 | ||||
| 	dl_server_init(dl_se, rq, fair_server_has_tasks, fair_server_pick_next, | ||||
| 		       fair_server_pick_task); | ||||
| 
 | ||||
| 	dl_server_init(dl_se, rq, fair_server_has_tasks, fair_server_pick_task); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Account for a descheduled task: | ||||
|  */ | ||||
| static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) | ||||
| static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, struct task_struct *next) | ||||
| { | ||||
| 	struct sched_entity *se = &prev->se; | ||||
| 	struct cfs_rq *cfs_rq; | ||||
|  | @ -13098,11 +13070,16 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p) | |||
| 	 * and we cannot use DEQUEUE_DELAYED. | ||||
| 	 */ | ||||
| 	if (p->se.sched_delayed) { | ||||
| 		/* First, dequeue it from its new class' structures */ | ||||
| 		dequeue_task(rq, p, DEQUEUE_NOCLOCK | DEQUEUE_SLEEP); | ||||
| 		p->se.sched_delayed = 0; | ||||
| 		/*
 | ||||
| 		 * Now, clean up the fair_sched_class side of things | ||||
| 		 * related to sched_delayed being true and that wasn't done | ||||
| 		 * due to the generic dequeue not using DEQUEUE_DELAYED. | ||||
| 		 */ | ||||
| 		finish_delayed_dequeue_entity(&p->se); | ||||
| 		p->se.rel_deadline = 0; | ||||
| 		if (sched_feat(DELAY_ZERO) && p->se.vlag > 0) | ||||
| 			p->se.vlag = 0; | ||||
| 		__block_task(rq, p); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  | @ -13127,12 +13104,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p) | |||
| 	} | ||||
| } | ||||
| 
 | ||||
| /* Account for a task changing its policy or group.
 | ||||
|  * | ||||
|  * This routine is mostly called to set cfs_rq->curr field when a task | ||||
|  * migrates between groups/classes. | ||||
|  */ | ||||
| static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) | ||||
| static void __set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) | ||||
| { | ||||
| 	struct sched_entity *se = &p->se; | ||||
| 
 | ||||
|  | @ -13145,6 +13117,27 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) | |||
| 		list_move(&se->group_node, &rq->cfs_tasks); | ||||
| 	} | ||||
| #endif | ||||
| 	if (!first) | ||||
| 		return; | ||||
| 
 | ||||
| 	SCHED_WARN_ON(se->sched_delayed); | ||||
| 
 | ||||
| 	if (hrtick_enabled_fair(rq)) | ||||
| 		hrtick_start_fair(rq, p); | ||||
| 
 | ||||
| 	update_misfit_status(p, rq); | ||||
| 	sched_fair_update_stop_tick(rq, p); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Account for a task changing its policy or group. | ||||
|  * | ||||
|  * This routine is mostly called to set cfs_rq->curr field when a task | ||||
|  * migrates between groups/classes. | ||||
|  */ | ||||
| static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) | ||||
| { | ||||
| 	struct sched_entity *se = &p->se; | ||||
| 
 | ||||
| 	for_each_sched_entity(se) { | ||||
| 		struct cfs_rq *cfs_rq = cfs_rq_of(se); | ||||
|  | @ -13154,10 +13147,7 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) | |||
| 		account_cfs_rq_runtime(cfs_rq, 0); | ||||
| 	} | ||||
| 
 | ||||
| 	if (!first) | ||||
| 		return; | ||||
| 
 | ||||
| 	SCHED_WARN_ON(se->sched_delayed); | ||||
| 	__set_next_task_fair(rq, p, first); | ||||
| } | ||||
| 
 | ||||
| void init_cfs_rq(struct cfs_rq *cfs_rq) | ||||
|  | @ -13483,13 +13473,13 @@ DEFINE_SCHED_CLASS(fair) = { | |||
| 
 | ||||
| 	.wakeup_preempt		= check_preempt_wakeup_fair, | ||||
| 
 | ||||
| 	.pick_task		= pick_task_fair, | ||||
| 	.pick_next_task		= __pick_next_task_fair, | ||||
| 	.put_prev_task		= put_prev_task_fair, | ||||
| 	.set_next_task          = set_next_task_fair, | ||||
| 
 | ||||
| #ifdef CONFIG_SMP | ||||
| 	.balance		= balance_fair, | ||||
| 	.pick_task		= pick_task_fair, | ||||
| 	.select_task_rq		= select_task_rq_fair, | ||||
| 	.migrate_task_rq	= migrate_task_rq_fair, | ||||
| 
 | ||||
|  |  | |||
|  | @ -450,7 +450,7 @@ static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags) | |||
| 	resched_curr(rq); | ||||
| } | ||||
| 
 | ||||
| static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) | ||||
| static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct task_struct *next) | ||||
| { | ||||
| 	dl_server_update_idle_time(rq, prev); | ||||
| 	scx_update_idle(rq, false); | ||||
|  | @ -464,21 +464,10 @@ static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool fir | |||
| 	next->se.exec_start = rq_clock_task(rq); | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_SMP | ||||
| static struct task_struct *pick_task_idle(struct rq *rq) | ||||
| struct task_struct *pick_task_idle(struct rq *rq) | ||||
| { | ||||
| 	return rq->idle; | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
| struct task_struct *pick_next_task_idle(struct rq *rq) | ||||
| { | ||||
| 	struct task_struct *next = rq->idle; | ||||
| 
 | ||||
| 	set_next_task_idle(rq, next, true); | ||||
| 
 | ||||
| 	return next; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * It is not legal to sleep in the idle task - print a warning | ||||
|  | @ -533,13 +522,12 @@ DEFINE_SCHED_CLASS(idle) = { | |||
| 
 | ||||
| 	.wakeup_preempt		= wakeup_preempt_idle, | ||||
| 
 | ||||
| 	.pick_next_task		= pick_next_task_idle, | ||||
| 	.pick_task		= pick_task_idle, | ||||
| 	.put_prev_task		= put_prev_task_idle, | ||||
| 	.set_next_task          = set_next_task_idle, | ||||
| 
 | ||||
| #ifdef CONFIG_SMP | ||||
| 	.balance		= balance_idle, | ||||
| 	.pick_task		= pick_task_idle, | ||||
| 	.select_task_rq		= select_task_rq_idle, | ||||
| 	.set_cpus_allowed	= set_cpus_allowed_common, | ||||
| #endif | ||||
|  |  | |||
|  | @ -1748,17 +1748,7 @@ static struct task_struct *pick_task_rt(struct rq *rq) | |||
| 	return p; | ||||
| } | ||||
| 
 | ||||
| static struct task_struct *pick_next_task_rt(struct rq *rq) | ||||
| { | ||||
| 	struct task_struct *p = pick_task_rt(rq); | ||||
| 
 | ||||
| 	if (p) | ||||
| 		set_next_task_rt(rq, p, true); | ||||
| 
 | ||||
| 	return p; | ||||
| } | ||||
| 
 | ||||
| static void put_prev_task_rt(struct rq *rq, struct task_struct *p) | ||||
| static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct task_struct *next) | ||||
| { | ||||
| 	struct sched_rt_entity *rt_se = &p->rt; | ||||
| 	struct rt_rq *rt_rq = &rq->rt; | ||||
|  | @ -2645,13 +2635,12 @@ DEFINE_SCHED_CLASS(rt) = { | |||
| 
 | ||||
| 	.wakeup_preempt		= wakeup_preempt_rt, | ||||
| 
 | ||||
| 	.pick_next_task		= pick_next_task_rt, | ||||
| 	.pick_task		= pick_task_rt, | ||||
| 	.put_prev_task		= put_prev_task_rt, | ||||
| 	.set_next_task          = set_next_task_rt, | ||||
| 
 | ||||
| #ifdef CONFIG_SMP | ||||
| 	.balance		= balance_rt, | ||||
| 	.pick_task		= pick_task_rt, | ||||
| 	.select_task_rq		= select_task_rq_rt, | ||||
| 	.set_cpus_allowed       = set_cpus_allowed_common, | ||||
| 	.rq_online              = rq_online_rt, | ||||
|  |  | |||
|  | @ -389,7 +389,6 @@ extern void dl_server_start(struct sched_dl_entity *dl_se); | |||
| extern void dl_server_stop(struct sched_dl_entity *dl_se); | ||||
| extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, | ||||
| 		    dl_server_has_tasks_f has_tasks, | ||||
| 		    dl_server_pick_f pick_next, | ||||
| 		    dl_server_pick_f pick_task); | ||||
| 
 | ||||
| extern void dl_server_update_idle_time(struct rq *rq, | ||||
|  | @ -1133,6 +1132,7 @@ struct rq { | |||
| 	unsigned int		nr_uninterruptible; | ||||
| 
 | ||||
| 	struct task_struct __rcu	*curr; | ||||
| 	struct sched_dl_entity	*dl_server; | ||||
| 	struct task_struct	*idle; | ||||
| 	struct task_struct	*stop; | ||||
| 	unsigned long		next_balance; | ||||
|  | @ -1260,6 +1260,7 @@ struct rq { | |||
| 	/* per rq */ | ||||
| 	struct rq		*core; | ||||
| 	struct task_struct	*core_pick; | ||||
| 	struct sched_dl_entity	*core_dl_server; | ||||
| 	unsigned int		core_enabled; | ||||
| 	unsigned int		core_sched_seq; | ||||
| 	struct rb_root		core_tree; | ||||
|  | @ -2368,9 +2369,19 @@ struct sched_class { | |||
| 	void (*wakeup_preempt)(struct rq *rq, struct task_struct *p, int flags); | ||||
| 
 | ||||
| 	int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); | ||||
| 	struct task_struct *(*pick_next_task)(struct rq *rq); | ||||
| 	struct task_struct *(*pick_task)(struct rq *rq); | ||||
| 	/*
 | ||||
| 	 * Optional! When implemented pick_next_task() should be equivalent to: | ||||
| 	 * | ||||
| 	 *   next = pick_task(); | ||||
| 	 *   if (next) { | ||||
| 	 *       put_prev_task(prev); | ||||
| 	 *       set_next_task_first(next); | ||||
| 	 *   } | ||||
| 	 */ | ||||
| 	struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev); | ||||
| 
 | ||||
| 	void (*put_prev_task)(struct rq *rq, struct task_struct *p); | ||||
| 	void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct task_struct *next); | ||||
| 	void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); | ||||
| 
 | ||||
| 	void (*switch_class)(struct rq *rq, struct task_struct *next); | ||||
|  | @ -2378,8 +2389,6 @@ struct sched_class { | |||
| #ifdef CONFIG_SMP | ||||
| 	int  (*select_task_rq)(struct task_struct *p, int task_cpu, int flags); | ||||
| 
 | ||||
| 	struct task_struct * (*pick_task)(struct rq *rq); | ||||
| 
 | ||||
| 	void (*migrate_task_rq)(struct task_struct *p, int new_cpu); | ||||
| 
 | ||||
| 	void (*task_woken)(struct rq *this_rq, struct task_struct *task); | ||||
|  | @ -2426,7 +2435,7 @@ struct sched_class { | |||
| static inline void put_prev_task(struct rq *rq, struct task_struct *prev) | ||||
| { | ||||
| 	WARN_ON_ONCE(rq->curr != prev); | ||||
| 	prev->sched_class->put_prev_task(rq, prev); | ||||
| 	prev->sched_class->put_prev_task(rq, prev, NULL); | ||||
| } | ||||
| 
 | ||||
| static inline void set_next_task(struct rq *rq, struct task_struct *next) | ||||
|  | @ -2434,6 +2443,30 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next) | |||
| 	next->sched_class->set_next_task(rq, next, false); | ||||
| } | ||||
| 
 | ||||
| static inline void | ||||
| __put_prev_set_next_dl_server(struct rq *rq, | ||||
| 			      struct task_struct *prev, | ||||
| 			      struct task_struct *next) | ||||
| { | ||||
| 	prev->dl_server = NULL; | ||||
| 	next->dl_server = rq->dl_server; | ||||
| 	rq->dl_server = NULL; | ||||
| } | ||||
| 
 | ||||
| static inline void put_prev_set_next_task(struct rq *rq, | ||||
| 					  struct task_struct *prev, | ||||
| 					  struct task_struct *next) | ||||
| { | ||||
| 	WARN_ON_ONCE(rq->curr != prev); | ||||
| 
 | ||||
| 	__put_prev_set_next_dl_server(rq, prev, next); | ||||
| 
 | ||||
| 	if (next == prev) | ||||
| 		return; | ||||
| 
 | ||||
| 	prev->sched_class->put_prev_task(rq, prev, next); | ||||
| 	next->sched_class->set_next_task(rq, next, true); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Helper to define a sched_class instance; each one is placed in a separate | ||||
|  | @ -2524,7 +2557,7 @@ static inline bool sched_fair_runnable(struct rq *rq) | |||
| } | ||||
| 
 | ||||
| extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); | ||||
| extern struct task_struct *pick_next_task_idle(struct rq *rq); | ||||
| extern struct task_struct *pick_task_idle(struct rq *rq); | ||||
| 
 | ||||
| #define SCA_CHECK		0x01 | ||||
| #define SCA_MIGRATE_DISABLE	0x02 | ||||
|  |  | |||
|  | @ -41,16 +41,6 @@ static struct task_struct *pick_task_stop(struct rq *rq) | |||
| 	return rq->stop; | ||||
| } | ||||
| 
 | ||||
| static struct task_struct *pick_next_task_stop(struct rq *rq) | ||||
| { | ||||
| 	struct task_struct *p = pick_task_stop(rq); | ||||
| 
 | ||||
| 	if (p) | ||||
| 		set_next_task_stop(rq, p, true); | ||||
| 
 | ||||
| 	return p; | ||||
| } | ||||
| 
 | ||||
| static void | ||||
| enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) | ||||
| { | ||||
|  | @ -69,7 +59,7 @@ static void yield_task_stop(struct rq *rq) | |||
| 	BUG(); /* the stop task should never yield, its pointless. */ | ||||
| } | ||||
| 
 | ||||
| static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) | ||||
| static void put_prev_task_stop(struct rq *rq, struct task_struct *prev, struct task_struct *next) | ||||
| { | ||||
| 	update_curr_common(rq); | ||||
| } | ||||
|  | @ -112,13 +102,12 @@ DEFINE_SCHED_CLASS(stop) = { | |||
| 
 | ||||
| 	.wakeup_preempt		= wakeup_preempt_stop, | ||||
| 
 | ||||
| 	.pick_next_task		= pick_next_task_stop, | ||||
| 	.pick_task		= pick_task_stop, | ||||
| 	.put_prev_task		= put_prev_task_stop, | ||||
| 	.set_next_task          = set_next_task_stop, | ||||
| 
 | ||||
| #ifdef CONFIG_SMP | ||||
| 	.balance		= balance_stop, | ||||
| 	.pick_task		= pick_task_stop, | ||||
| 	.select_task_rq		= select_task_rq_stop, | ||||
| 	.set_cpus_allowed	= set_cpus_allowed_common, | ||||
| #endif | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Tejun Heo
						Tejun Heo