mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	sched/fair, cpumask: Export for_each_cpu_wrap()
More users for for_each_cpu_wrap() have appeared. Promote the construct to generic cpumask interface. The implementation is slightly modified to reduce arguments. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Lauro Ramos Venancio <lvenanci@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: lwang@redhat.com Link: http://lkml.kernel.org/r/20170414122005.o35me2h5nowqkxbv@hirez.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
							parent
							
								
									8c0334697d
								
							
						
					
					
						commit
						c743f0a5c5
					
				
					 3 changed files with 53 additions and 41 deletions
				
			
		| 
						 | 
					@ -236,6 +236,23 @@ unsigned int cpumask_local_spread(unsigned int i, int node);
 | 
				
			||||||
		(cpu) = cpumask_next_zero((cpu), (mask)),	\
 | 
							(cpu) = cpumask_next_zero((cpu), (mask)),	\
 | 
				
			||||||
		(cpu) < nr_cpu_ids;)
 | 
							(cpu) < nr_cpu_ids;)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
 | 
				
			||||||
 | 
					 * @cpu: the (optionally unsigned) integer iterator
 | 
				
			||||||
 | 
					 * @mask: the cpumask poiter
 | 
				
			||||||
 | 
					 * @start: the start location
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * The implementation does not assume any bit in @mask is set (including @start).
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * After the loop, cpu is >= nr_cpu_ids.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					#define for_each_cpu_wrap(cpu, mask, start)					\
 | 
				
			||||||
 | 
						for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false);	\
 | 
				
			||||||
 | 
						     (cpu) < nr_cpumask_bits;						\
 | 
				
			||||||
 | 
						     (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * for_each_cpu_and - iterate over every cpu in both masks
 | 
					 * for_each_cpu_and - iterate over every cpu in both masks
 | 
				
			||||||
 * @cpu: the (optionally unsigned) integer iterator
 | 
					 * @cpu: the (optionally unsigned) integer iterator
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -5640,43 +5640,6 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
 | 
				
			||||||
	return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
 | 
						return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * Implement a for_each_cpu() variant that starts the scan at a given cpu
 | 
					 | 
				
			||||||
 * (@start), and wraps around.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * This is used to scan for idle CPUs; such that not all CPUs looking for an
 | 
					 | 
				
			||||||
 * idle CPU find the same CPU. The down-side is that tasks tend to cycle
 | 
					 | 
				
			||||||
 * through the LLC domain.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * Especially tbench is found sensitive to this.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static int cpumask_next_wrap(int n, const struct cpumask *mask, int start, int *wrapped)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	int next;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
again:
 | 
					 | 
				
			||||||
	next = find_next_bit(cpumask_bits(mask), nr_cpumask_bits, n+1);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (*wrapped) {
 | 
					 | 
				
			||||||
		if (next >= start)
 | 
					 | 
				
			||||||
			return nr_cpumask_bits;
 | 
					 | 
				
			||||||
	} else {
 | 
					 | 
				
			||||||
		if (next >= nr_cpumask_bits) {
 | 
					 | 
				
			||||||
			*wrapped = 1;
 | 
					 | 
				
			||||||
			n = -1;
 | 
					 | 
				
			||||||
			goto again;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return next;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define for_each_cpu_wrap(cpu, mask, start, wrap)				\
 | 
					 | 
				
			||||||
	for ((wrap) = 0, (cpu) = (start)-1;					\
 | 
					 | 
				
			||||||
		(cpu) = cpumask_next_wrap((cpu), (mask), (start), &(wrap)),	\
 | 
					 | 
				
			||||||
		(cpu) < nr_cpumask_bits; )
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifdef CONFIG_SCHED_SMT
 | 
					#ifdef CONFIG_SCHED_SMT
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void set_idle_cores(int cpu, int val)
 | 
					static inline void set_idle_cores(int cpu, int val)
 | 
				
			||||||
| 
						 | 
					@ -5736,7 +5699,7 @@ void __update_idle_core(struct rq *rq)
 | 
				
			||||||
static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
 | 
					static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
 | 
						struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
 | 
				
			||||||
	int core, cpu, wrap;
 | 
						int core, cpu;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!static_branch_likely(&sched_smt_present))
 | 
						if (!static_branch_likely(&sched_smt_present))
 | 
				
			||||||
		return -1;
 | 
							return -1;
 | 
				
			||||||
| 
						 | 
					@ -5746,7 +5709,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
 | 
						cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for_each_cpu_wrap(core, cpus, target, wrap) {
 | 
						for_each_cpu_wrap(core, cpus, target) {
 | 
				
			||||||
		bool idle = true;
 | 
							bool idle = true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		for_each_cpu(cpu, cpu_smt_mask(core)) {
 | 
							for_each_cpu(cpu, cpu_smt_mask(core)) {
 | 
				
			||||||
| 
						 | 
					@ -5812,7 +5775,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
 | 
				
			||||||
	u64 avg_cost, avg_idle = this_rq()->avg_idle;
 | 
						u64 avg_cost, avg_idle = this_rq()->avg_idle;
 | 
				
			||||||
	u64 time, cost;
 | 
						u64 time, cost;
 | 
				
			||||||
	s64 delta;
 | 
						s64 delta;
 | 
				
			||||||
	int cpu, wrap;
 | 
						int cpu;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
 | 
						this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
 | 
				
			||||||
	if (!this_sd)
 | 
						if (!this_sd)
 | 
				
			||||||
| 
						 | 
					@ -5829,7 +5792,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	time = local_clock();
 | 
						time = local_clock();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
 | 
						for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
 | 
				
			||||||
		if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
 | 
							if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
		if (idle_cpu(cpu))
 | 
							if (idle_cpu(cpu))
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -43,6 +43,38 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(cpumask_any_but);
 | 
					EXPORT_SYMBOL(cpumask_any_but);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * cpumask_next_wrap - helper to implement for_each_cpu_wrap
 | 
				
			||||||
 | 
					 * @n: the cpu prior to the place to search
 | 
				
			||||||
 | 
					 * @mask: the cpumask pointer
 | 
				
			||||||
 | 
					 * @start: the start point of the iteration
 | 
				
			||||||
 | 
					 * @wrap: assume @n crossing @start terminates the iteration
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Returns >= nr_cpu_ids on completion
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Note: the @wrap argument is required for the start condition when
 | 
				
			||||||
 | 
					 * we cannot assume @start is set in @mask.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int next;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					again:
 | 
				
			||||||
 | 
						next = cpumask_next(n, mask);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (wrap && n < start && next >= start) {
 | 
				
			||||||
 | 
							return nr_cpumask_bits;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						} else if (next >= nr_cpumask_bits) {
 | 
				
			||||||
 | 
							wrap = true;
 | 
				
			||||||
 | 
							n = -1;
 | 
				
			||||||
 | 
							goto again;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return next;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					EXPORT_SYMBOL(cpumask_next_wrap);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* These are not inline because of header tangles. */
 | 
					/* These are not inline because of header tangles. */
 | 
				
			||||||
#ifdef CONFIG_CPUMASK_OFFSTACK
 | 
					#ifdef CONFIG_CPUMASK_OFFSTACK
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue