forked from mirrors/linux
		
	sched/smt: Introduce sched_smt_present_inc/dec() helper
Introduce sched_smt_present_inc/dec() helper, so it can be called in normal or error path simply. No functional changed. Cc: stable@kernel.org Signed-off-by: Yang Yingliang <yangyingliang@huawei.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20240703031610.587047-2-yangyingliang@huaweicloud.com
This commit is contained in:
		
							parent
							
								
									77baa5bafc
								
							
						
					
					
						commit
						31b164e2e4
					
				
					 1 changed files with 19 additions and 7 deletions
				
			
		| 
						 | 
					@ -7895,6 +7895,22 @@ static int cpuset_cpu_inactive(unsigned int cpu)
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void sched_smt_present_inc(int cpu)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					#ifdef CONFIG_SCHED_SMT
 | 
				
			||||||
 | 
						if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
 | 
				
			||||||
 | 
							static_branch_inc_cpuslocked(&sched_smt_present);
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void sched_smt_present_dec(int cpu)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					#ifdef CONFIG_SCHED_SMT
 | 
				
			||||||
 | 
						if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
 | 
				
			||||||
 | 
							static_branch_dec_cpuslocked(&sched_smt_present);
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int sched_cpu_activate(unsigned int cpu)
 | 
					int sched_cpu_activate(unsigned int cpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct rq *rq = cpu_rq(cpu);
 | 
						struct rq *rq = cpu_rq(cpu);
 | 
				
			||||||
| 
						 | 
					@ -7906,13 +7922,10 @@ int sched_cpu_activate(unsigned int cpu)
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	balance_push_set(cpu, false);
 | 
						balance_push_set(cpu, false);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_SCHED_SMT
 | 
					 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * When going up, increment the number of cores with SMT present.
 | 
						 * When going up, increment the number of cores with SMT present.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
 | 
						sched_smt_present_inc(cpu);
 | 
				
			||||||
		static_branch_inc_cpuslocked(&sched_smt_present);
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
	set_cpu_active(cpu, true);
 | 
						set_cpu_active(cpu, true);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (sched_smp_initialized) {
 | 
						if (sched_smp_initialized) {
 | 
				
			||||||
| 
						 | 
					@ -7981,13 +7994,12 @@ int sched_cpu_deactivate(unsigned int cpu)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	rq_unlock_irqrestore(rq, &rf);
 | 
						rq_unlock_irqrestore(rq, &rf);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_SCHED_SMT
 | 
					 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * When going down, decrement the number of cores with SMT present.
 | 
						 * When going down, decrement the number of cores with SMT present.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
 | 
						sched_smt_present_dec(cpu);
 | 
				
			||||||
		static_branch_dec_cpuslocked(&sched_smt_present);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_SCHED_SMT
 | 
				
			||||||
	sched_core_cpu_deactivate(cpu);
 | 
						sched_core_cpu_deactivate(cpu);
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue