mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	sched/fair: Remove redundant check in select_idle_smt()
If two cpus share LLC cache, then the two cores they belong to are also in the same LLC domain. Signed-off-by: Abel Wu <wuyun.abel@bytedance.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Josh Don <joshdon@google.com> Acked-by: Mel Gorman <mgorman@techsingularity.net> Link: https://lore.kernel.org/r/20220907112000.1854-2-wuyun.abel@bytedance.com
This commit is contained in:
		
							parent
							
								
									33f9352579
								
							
						
					
					
						commit
						3e6efe87cd
					
				
					 1 changed files with 4 additions and 7 deletions
				
			
		| 
						 | 
					@ -6350,14 +6350,11 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Scan the local SMT mask for idle CPUs.
 | 
					 * Scan the local SMT mask for idle CPUs.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
 | 
					static int select_idle_smt(struct task_struct *p, int target)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int cpu;
 | 
						int cpu;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for_each_cpu(cpu, cpu_smt_mask(target)) {
 | 
						for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) {
 | 
				
			||||||
		if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
 | 
					 | 
				
			||||||
		    !cpumask_test_cpu(cpu, sched_domain_span(sd)))
 | 
					 | 
				
			||||||
			continue;
 | 
					 | 
				
			||||||
		if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
 | 
							if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
 | 
				
			||||||
			return cpu;
 | 
								return cpu;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -6381,7 +6378,7 @@ static inline int select_idle_core(struct task_struct *p, int core, struct cpuma
 | 
				
			||||||
	return __select_idle_cpu(core, p);
 | 
						return __select_idle_cpu(core, p);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
 | 
					static inline int select_idle_smt(struct task_struct *p, int target)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return -1;
 | 
						return -1;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -6615,7 +6612,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
 | 
				
			||||||
		has_idle_core = test_idle_cores(target, false);
 | 
							has_idle_core = test_idle_cores(target, false);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!has_idle_core && cpus_share_cache(prev, target)) {
 | 
							if (!has_idle_core && cpus_share_cache(prev, target)) {
 | 
				
			||||||
			i = select_idle_smt(p, sd, prev);
 | 
								i = select_idle_smt(p, prev);
 | 
				
			||||||
			if ((unsigned int)i < nr_cpumask_bits)
 | 
								if ((unsigned int)i < nr_cpumask_bits)
 | 
				
			||||||
				return i;
 | 
									return i;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue