mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	sched/core: Introduce sched_set_rq_on/offline() helper
Introduce sched_set_rq_on/offline() helper, so it can be called in normal or error path simply. No functional changed. Cc: stable@kernel.org Signed-off-by: Yang Yingliang <yangyingliang@huawei.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20240703031610.587047-4-yangyingliang@huaweicloud.com
This commit is contained in:
		
							parent
							
								
									e22f910a26
								
							
						
					
					
						commit
						2f02735412
					
				
					 1 changed files with 26 additions and 14 deletions
				
			
		|  | @ -7845,6 +7845,30 @@ void set_rq_offline(struct rq *rq) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | static inline void sched_set_rq_online(struct rq *rq, int cpu) | ||||||
|  | { | ||||||
|  | 	struct rq_flags rf; | ||||||
|  | 
 | ||||||
|  | 	rq_lock_irqsave(rq, &rf); | ||||||
|  | 	if (rq->rd) { | ||||||
|  | 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); | ||||||
|  | 		set_rq_online(rq); | ||||||
|  | 	} | ||||||
|  | 	rq_unlock_irqrestore(rq, &rf); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static inline void sched_set_rq_offline(struct rq *rq, int cpu) | ||||||
|  | { | ||||||
|  | 	struct rq_flags rf; | ||||||
|  | 
 | ||||||
|  | 	rq_lock_irqsave(rq, &rf); | ||||||
|  | 	if (rq->rd) { | ||||||
|  | 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); | ||||||
|  | 		set_rq_offline(rq); | ||||||
|  | 	} | ||||||
|  | 	rq_unlock_irqrestore(rq, &rf); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| /*
 | /*
 | ||||||
|  * used to mark begin/end of suspend/resume: |  * used to mark begin/end of suspend/resume: | ||||||
|  */ |  */ | ||||||
|  | @ -7914,7 +7938,6 @@ static inline void sched_smt_present_dec(int cpu) | ||||||
| int sched_cpu_activate(unsigned int cpu) | int sched_cpu_activate(unsigned int cpu) | ||||||
| { | { | ||||||
| 	struct rq *rq = cpu_rq(cpu); | 	struct rq *rq = cpu_rq(cpu); | ||||||
| 	struct rq_flags rf; |  | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Clear the balance_push callback and prepare to schedule | 	 * Clear the balance_push callback and prepare to schedule | ||||||
|  | @ -7943,12 +7966,7 @@ int sched_cpu_activate(unsigned int cpu) | ||||||
| 	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the | 	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the | ||||||
| 	 *    domains. | 	 *    domains. | ||||||
| 	 */ | 	 */ | ||||||
| 	rq_lock_irqsave(rq, &rf); | 	sched_set_rq_online(rq, cpu); | ||||||
| 	if (rq->rd) { |  | ||||||
| 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |  | ||||||
| 		set_rq_online(rq); |  | ||||||
| 	} |  | ||||||
| 	rq_unlock_irqrestore(rq, &rf); |  | ||||||
| 
 | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
|  | @ -7956,7 +7974,6 @@ int sched_cpu_activate(unsigned int cpu) | ||||||
| int sched_cpu_deactivate(unsigned int cpu) | int sched_cpu_deactivate(unsigned int cpu) | ||||||
| { | { | ||||||
| 	struct rq *rq = cpu_rq(cpu); | 	struct rq *rq = cpu_rq(cpu); | ||||||
| 	struct rq_flags rf; |  | ||||||
| 	int ret; | 	int ret; | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
|  | @ -7987,12 +8004,7 @@ int sched_cpu_deactivate(unsigned int cpu) | ||||||
| 	 */ | 	 */ | ||||||
| 	synchronize_rcu(); | 	synchronize_rcu(); | ||||||
| 
 | 
 | ||||||
| 	rq_lock_irqsave(rq, &rf); | 	sched_set_rq_offline(rq, cpu); | ||||||
| 	if (rq->rd) { |  | ||||||
| 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |  | ||||||
| 		set_rq_offline(rq); |  | ||||||
| 	} |  | ||||||
| 	rq_unlock_irqrestore(rq, &rf); |  | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * When going down, decrement the number of cores with SMT present. | 	 * When going down, decrement the number of cores with SMT present. | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Yang Yingliang
						Yang Yingliang