mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	sched/topology: Remove unused 'sd' parameter from arch_scale_cpu_capacity()
The 'struct sched_domain *sd' parameter to arch_scale_cpu_capacity() is
unused since commit:
  765d0af19f ("sched/topology: Remove the ::smt_gain field from 'struct sched_domain'")
Remove it.
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Viresh Kumar <viresh.kumar@linaro.org>
Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: gregkh@linuxfoundation.org
Cc: linux@armlinux.org.uk
Cc: quentin.perret@arm.com
Cc: rafael@kernel.org
Link: https://lkml.kernel.org/r/1560783617-5827-1-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
			
			
This commit is contained in:
		
							parent
							
								
									d2abae71eb
								
							
						
					
					
						commit
						8ec59c0f5f
					
				
					 13 changed files with 22 additions and 30 deletions
				
			
		| 
						 | 
				
			
			@ -169,7 +169,7 @@ static void update_cpu_capacity(unsigned int cpu)
 | 
			
		|||
	topology_set_cpu_scale(cpu, cpu_capacity(cpu) / middle_capacity);
 | 
			
		||||
 | 
			
		||||
	pr_info("CPU%u: update cpu_capacity %lu\n",
 | 
			
		||||
		cpu, topology_get_cpu_scale(NULL, cpu));
 | 
			
		||||
		cpu, topology_get_cpu_scale(cpu));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#else
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -43,7 +43,7 @@ static ssize_t cpu_capacity_show(struct device *dev,
 | 
			
		|||
{
 | 
			
		||||
	struct cpu *cpu = container_of(dev, struct cpu, dev);
 | 
			
		||||
 | 
			
		||||
	return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id));
 | 
			
		||||
	return sprintf(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void update_topology_flags_workfn(struct work_struct *work);
 | 
			
		||||
| 
						 | 
				
			
			@ -116,7 +116,7 @@ void topology_normalize_cpu_scale(void)
 | 
			
		|||
			/ capacity_scale;
 | 
			
		||||
		topology_set_cpu_scale(cpu, capacity);
 | 
			
		||||
		pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
 | 
			
		||||
			cpu, topology_get_cpu_scale(NULL, cpu));
 | 
			
		||||
			cpu, topology_get_cpu_scale(cpu));
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -185,7 +185,7 @@ init_cpu_capacity_callback(struct notifier_block *nb,
 | 
			
		|||
	cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
 | 
			
		||||
 | 
			
		||||
	for_each_cpu(cpu, policy->related_cpus) {
 | 
			
		||||
		raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) *
 | 
			
		||||
		raw_capacity[cpu] = topology_get_cpu_scale(cpu) *
 | 
			
		||||
				    policy->cpuinfo.max_freq / 1000UL;
 | 
			
		||||
		capacity_scale = max(raw_capacity[cpu], capacity_scale);
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -18,7 +18,7 @@ DECLARE_PER_CPU(unsigned long, cpu_scale);
 | 
			
		|||
 | 
			
		||||
struct sched_domain;
 | 
			
		||||
static inline
 | 
			
		||||
unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu)
 | 
			
		||||
unsigned long topology_get_cpu_scale(int cpu)
 | 
			
		||||
{
 | 
			
		||||
	return per_cpu(cpu_scale, cpu);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -89,7 +89,7 @@ static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
 | 
			
		|||
	 * like schedutil.
 | 
			
		||||
	 */
 | 
			
		||||
	cpu = cpumask_first(to_cpumask(pd->cpus));
 | 
			
		||||
	scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
 | 
			
		||||
	scale_cpu = arch_scale_cpu_capacity(cpu);
 | 
			
		||||
	cs = &pd->table[pd->nr_cap_states - 1];
 | 
			
		||||
	freq = map_util_freq(max_util, cs->frequency, scale_cpu);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -196,14 +196,6 @@ extern void set_sched_topology(struct sched_domain_topology_level *tl);
 | 
			
		|||
# define SD_INIT_NAME(type)
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifndef arch_scale_cpu_capacity
 | 
			
		||||
static __always_inline
 | 
			
		||||
unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
 | 
			
		||||
{
 | 
			
		||||
	return SCHED_CAPACITY_SCALE;
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#else /* CONFIG_SMP */
 | 
			
		||||
 | 
			
		||||
struct sched_domain_attr;
 | 
			
		||||
| 
						 | 
				
			
			@ -219,16 +211,16 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
 | 
			
		|||
	return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#endif	/* !CONFIG_SMP */
 | 
			
		||||
 | 
			
		||||
#ifndef arch_scale_cpu_capacity
 | 
			
		||||
static __always_inline
 | 
			
		||||
unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
 | 
			
		||||
unsigned long arch_scale_cpu_capacity(int cpu)
 | 
			
		||||
{
 | 
			
		||||
	return SCHED_CAPACITY_SCALE;
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#endif	/* !CONFIG_SMP */
 | 
			
		||||
 | 
			
		||||
static inline int task_node(const struct task_struct *p)
 | 
			
		||||
{
 | 
			
		||||
	return cpu_to_node(task_cpu(p));
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -223,7 +223,7 @@ int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
 | 
			
		|||
		 * All CPUs of a domain must have the same micro-architecture
 | 
			
		||||
		 * since they all share the same table.
 | 
			
		||||
		 */
 | 
			
		||||
		cap = arch_scale_cpu_capacity(NULL, cpu);
 | 
			
		||||
		cap = arch_scale_cpu_capacity(cpu);
 | 
			
		||||
		if (prev_cap && prev_cap != cap) {
 | 
			
		||||
			pr_err("CPUs of %*pbl must have the same capacity\n",
 | 
			
		||||
							cpumask_pr_args(span));
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -276,7 +276,7 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
 | 
			
		|||
{
 | 
			
		||||
	struct rq *rq = cpu_rq(sg_cpu->cpu);
 | 
			
		||||
	unsigned long util = cpu_util_cfs(rq);
 | 
			
		||||
	unsigned long max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
 | 
			
		||||
	unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
 | 
			
		||||
 | 
			
		||||
	sg_cpu->max = max;
 | 
			
		||||
	sg_cpu->bw_dl = cpu_bw_dl(rq);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1195,7 +1195,7 @@ static void update_curr_dl(struct rq *rq)
 | 
			
		|||
						 &curr->dl);
 | 
			
		||||
	} else {
 | 
			
		||||
		unsigned long scale_freq = arch_scale_freq_capacity(cpu);
 | 
			
		||||
		unsigned long scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
 | 
			
		||||
		unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
 | 
			
		||||
 | 
			
		||||
		scaled_delta_exec = cap_scale(delta_exec, scale_freq);
 | 
			
		||||
		scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -764,7 +764,7 @@ void post_init_entity_util_avg(struct task_struct *p)
 | 
			
		|||
	struct sched_entity *se = &p->se;
 | 
			
		||||
	struct cfs_rq *cfs_rq = cfs_rq_of(se);
 | 
			
		||||
	struct sched_avg *sa = &se->avg;
 | 
			
		||||
	long cpu_scale = arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
 | 
			
		||||
	long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq)));
 | 
			
		||||
	long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
 | 
			
		||||
 | 
			
		||||
	if (cap > 0) {
 | 
			
		||||
| 
						 | 
				
			
			@ -7646,7 +7646,7 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
 | 
			
		|||
static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu)
 | 
			
		||||
{
 | 
			
		||||
	struct rq *rq = cpu_rq(cpu);
 | 
			
		||||
	unsigned long max = arch_scale_cpu_capacity(sd, cpu);
 | 
			
		||||
	unsigned long max = arch_scale_cpu_capacity(cpu);
 | 
			
		||||
	unsigned long used, free;
 | 
			
		||||
	unsigned long irq;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -7671,7 +7671,7 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
 | 
			
		|||
	unsigned long capacity = scale_rt_capacity(sd, cpu);
 | 
			
		||||
	struct sched_group *sdg = sd->groups;
 | 
			
		||||
 | 
			
		||||
	cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu);
 | 
			
		||||
	cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
 | 
			
		||||
 | 
			
		||||
	if (!capacity)
 | 
			
		||||
		capacity = 1;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -366,7 +366,7 @@ int update_irq_load_avg(struct rq *rq, u64 running)
 | 
			
		|||
	 * reflect the real amount of computation
 | 
			
		||||
	 */
 | 
			
		||||
	running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
 | 
			
		||||
	running = cap_scale(running, arch_scale_cpu_capacity(NULL, cpu_of(rq)));
 | 
			
		||||
	running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq)));
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * We know the time that has been used by interrupt since last update
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -79,7 +79,7 @@ static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
 | 
			
		|||
	 * Scale the elapsed time to reflect the real amount of
 | 
			
		||||
	 * computation
 | 
			
		||||
	 */
 | 
			
		||||
	delta = cap_scale(delta, arch_scale_cpu_capacity(NULL, cpu_of(rq)));
 | 
			
		||||
	delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
 | 
			
		||||
	delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
 | 
			
		||||
 | 
			
		||||
	rq->clock_pelt += delta;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2248,7 +2248,7 @@ unsigned long schedutil_freq_util(int cpu, unsigned long util_cfs,
 | 
			
		|||
 | 
			
		||||
static inline unsigned long schedutil_energy_util(int cpu, unsigned long cfs)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long max = arch_scale_cpu_capacity(NULL, cpu);
 | 
			
		||||
	unsigned long max = arch_scale_cpu_capacity(cpu);
 | 
			
		||||
 | 
			
		||||
	return schedutil_freq_util(cpu, cfs, max, ENERGY_UTIL);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1874,10 +1874,10 @@ static struct sched_domain_topology_level
 | 
			
		|||
	unsigned long cap;
 | 
			
		||||
 | 
			
		||||
	/* Is there any asymmetry? */
 | 
			
		||||
	cap = arch_scale_cpu_capacity(NULL, cpumask_first(cpu_map));
 | 
			
		||||
	cap = arch_scale_cpu_capacity(cpumask_first(cpu_map));
 | 
			
		||||
 | 
			
		||||
	for_each_cpu(i, cpu_map) {
 | 
			
		||||
		if (arch_scale_cpu_capacity(NULL, i) != cap) {
 | 
			
		||||
		if (arch_scale_cpu_capacity(i) != cap) {
 | 
			
		||||
			asym = true;
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
| 
						 | 
				
			
			@ -1892,7 +1892,7 @@ static struct sched_domain_topology_level
 | 
			
		|||
	 * to everyone.
 | 
			
		||||
	 */
 | 
			
		||||
	for_each_cpu(i, cpu_map) {
 | 
			
		||||
		unsigned long max_capacity = arch_scale_cpu_capacity(NULL, i);
 | 
			
		||||
		unsigned long max_capacity = arch_scale_cpu_capacity(i);
 | 
			
		||||
		int tl_id = 0;
 | 
			
		||||
 | 
			
		||||
		for_each_sd_topology(tl) {
 | 
			
		||||
| 
						 | 
				
			
			@ -1902,7 +1902,7 @@ static struct sched_domain_topology_level
 | 
			
		|||
			for_each_cpu_and(j, tl->mask(i), cpu_map) {
 | 
			
		||||
				unsigned long capacity;
 | 
			
		||||
 | 
			
		||||
				capacity = arch_scale_cpu_capacity(NULL, j);
 | 
			
		||||
				capacity = arch_scale_cpu_capacity(j);
 | 
			
		||||
 | 
			
		||||
				if (capacity <= max_capacity)
 | 
			
		||||
					continue;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue