mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	Merge branch 'pm-cpufreq-sched'
* pm-cpufreq-sched: cpufreq: schedutil: Always process remote callback with slow switching cpufreq: schedutil: Don't restrict kthread to related_cpus unnecessarily cpufreq: Return 0 from ->fast_switch() on errors cpufreq: Simplify cpufreq_can_do_remote_dvfs() cpufreq: Process remote callbacks from any CPU if the platform permits sched: cpufreq: Allow remote cpufreq callbacks cpufreq: schedutil: Use unsigned int for iowait boost cpufreq: schedutil: Make iowait boost more energy efficient
This commit is contained in:
		
						commit
						08a10002be
					
				
					 10 changed files with 116 additions and 31 deletions
				
			
		| 
						 | 
					@ -274,6 +274,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
 | 
				
			||||||
		transition_latency = CPUFREQ_ETERNAL;
 | 
							transition_latency = CPUFREQ_ETERNAL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	policy->cpuinfo.transition_latency = transition_latency;
 | 
						policy->cpuinfo.transition_latency = transition_latency;
 | 
				
			||||||
 | 
						policy->dvfs_possible_from_any_cpu = true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1843,9 +1843,10 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
 | 
				
			||||||
 * twice in parallel for the same policy and that it will never be called in
 | 
					 * twice in parallel for the same policy and that it will never be called in
 | 
				
			||||||
 * parallel with either ->target() or ->target_index() for the same policy.
 | 
					 * parallel with either ->target() or ->target_index() for the same policy.
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch()
 | 
					 * Returns the actual frequency set for the CPU.
 | 
				
			||||||
 * callback to indicate an error condition, the hardware configuration must be
 | 
					 *
 | 
				
			||||||
 * preserved.
 | 
					 * If 0 is returned by the driver's ->fast_switch() callback to indicate an
 | 
				
			||||||
 | 
					 * error condition, the hardware configuration must be preserved.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
 | 
					unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
 | 
				
			||||||
					unsigned int target_freq)
 | 
										unsigned int target_freq)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -272,6 +272,9 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
 | 
				
			||||||
	struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
 | 
						struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
 | 
				
			||||||
	u64 delta_ns, lst;
 | 
						u64 delta_ns, lst;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!cpufreq_can_do_remote_dvfs(policy_dbs->policy))
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * The work may not be allowed to be queued up right now.
 | 
						 * The work may not be allowed to be queued up right now.
 | 
				
			||||||
	 * Possible reasons:
 | 
						 * Possible reasons:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1746,6 +1746,10 @@ static void intel_pstate_update_util_pid(struct update_util_data *data,
 | 
				
			||||||
	struct cpudata *cpu = container_of(data, struct cpudata, update_util);
 | 
						struct cpudata *cpu = container_of(data, struct cpudata, update_util);
 | 
				
			||||||
	u64 delta_ns = time - cpu->sample.time;
 | 
						u64 delta_ns = time - cpu->sample.time;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Don't allow remote callbacks */
 | 
				
			||||||
 | 
						if (smp_processor_id() != cpu->cpu)
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if ((s64)delta_ns < pid_params.sample_rate_ns)
 | 
						if ((s64)delta_ns < pid_params.sample_rate_ns)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1763,6 +1767,10 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
 | 
				
			||||||
	struct cpudata *cpu = container_of(data, struct cpudata, update_util);
 | 
						struct cpudata *cpu = container_of(data, struct cpudata, update_util);
 | 
				
			||||||
	u64 delta_ns;
 | 
						u64 delta_ns;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Don't allow remote callbacks */
 | 
				
			||||||
 | 
						if (smp_processor_id() != cpu->cpu)
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (flags & SCHED_CPUFREQ_IOWAIT) {
 | 
						if (flags & SCHED_CPUFREQ_IOWAIT) {
 | 
				
			||||||
		cpu->iowait_boost = int_tofp(1);
 | 
							cpu->iowait_boost = int_tofp(1);
 | 
				
			||||||
	} else if (cpu->iowait_boost) {
 | 
						} else if (cpu->iowait_boost) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -127,6 +127,15 @@ struct cpufreq_policy {
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	unsigned int		transition_delay_us;
 | 
						unsigned int		transition_delay_us;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Remote DVFS flag (Not added to the driver structure as we don't want
 | 
				
			||||||
 | 
						 * to access another structure from scheduler hotpath).
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 * Should be set if CPUs can do DVFS on behalf of other CPUs from
 | 
				
			||||||
 | 
						 * different cpufreq policies.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						bool			dvfs_possible_from_any_cpu;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	 /* Cached frequency lookup from cpufreq_driver_resolve_freq. */
 | 
						 /* Cached frequency lookup from cpufreq_driver_resolve_freq. */
 | 
				
			||||||
	unsigned int cached_target_freq;
 | 
						unsigned int cached_target_freq;
 | 
				
			||||||
	int cached_resolved_idx;
 | 
						int cached_resolved_idx;
 | 
				
			||||||
| 
						 | 
					@ -562,6 +571,17 @@ struct governor_attr {
 | 
				
			||||||
			 size_t count);
 | 
								 size_t count);
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline bool cpufreq_can_do_remote_dvfs(struct cpufreq_policy *policy)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Allow remote callbacks if:
 | 
				
			||||||
 | 
						 * - dvfs_possible_from_any_cpu flag is set
 | 
				
			||||||
 | 
						 * - the local and remote CPUs share cpufreq policy
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						return policy->dvfs_possible_from_any_cpu ||
 | 
				
			||||||
 | 
							cpumask_test_cpu(smp_processor_id(), policy->cpus);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*********************************************************************
 | 
					/*********************************************************************
 | 
				
			||||||
 *                     FREQUENCY TABLE HELPERS                       *
 | 
					 *                     FREQUENCY TABLE HELPERS                       *
 | 
				
			||||||
 *********************************************************************/
 | 
					 *********************************************************************/
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -52,9 +52,11 @@ struct sugov_policy {
 | 
				
			||||||
struct sugov_cpu {
 | 
					struct sugov_cpu {
 | 
				
			||||||
	struct update_util_data update_util;
 | 
						struct update_util_data update_util;
 | 
				
			||||||
	struct sugov_policy *sg_policy;
 | 
						struct sugov_policy *sg_policy;
 | 
				
			||||||
 | 
						unsigned int cpu;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	unsigned long iowait_boost;
 | 
						bool iowait_boost_pending;
 | 
				
			||||||
	unsigned long iowait_boost_max;
 | 
						unsigned int iowait_boost;
 | 
				
			||||||
 | 
						unsigned int iowait_boost_max;
 | 
				
			||||||
	u64 last_update;
 | 
						u64 last_update;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* The fields below are only needed when sharing a policy. */
 | 
						/* The fields below are only needed when sharing a policy. */
 | 
				
			||||||
| 
						 | 
					@ -76,6 +78,26 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	s64 delta_ns;
 | 
						s64 delta_ns;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Since cpufreq_update_util() is called with rq->lock held for
 | 
				
			||||||
 | 
						 * the @target_cpu, our per-cpu data is fully serialized.
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 * However, drivers cannot in general deal with cross-cpu
 | 
				
			||||||
 | 
						 * requests, so while get_next_freq() will work, our
 | 
				
			||||||
 | 
						 * sugov_update_commit() call may not for the fast switching platforms.
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 * Hence stop here for remote requests if they aren't supported
 | 
				
			||||||
 | 
						 * by the hardware, as calculating the frequency is pointless if
 | 
				
			||||||
 | 
						 * we cannot in fact act on it.
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 * For the slow switching platforms, the kthread is always scheduled on
 | 
				
			||||||
 | 
						 * the right set of CPUs and any CPU can find the next frequency and
 | 
				
			||||||
 | 
						 * schedule the kthread.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if (sg_policy->policy->fast_switch_enabled &&
 | 
				
			||||||
 | 
						    !cpufreq_can_do_remote_dvfs(sg_policy->policy))
 | 
				
			||||||
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (sg_policy->work_in_progress)
 | 
						if (sg_policy->work_in_progress)
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -106,7 +128,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (policy->fast_switch_enabled) {
 | 
						if (policy->fast_switch_enabled) {
 | 
				
			||||||
		next_freq = cpufreq_driver_fast_switch(policy, next_freq);
 | 
							next_freq = cpufreq_driver_fast_switch(policy, next_freq);
 | 
				
			||||||
		if (next_freq == CPUFREQ_ENTRY_INVALID)
 | 
							if (!next_freq)
 | 
				
			||||||
			return;
 | 
								return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		policy->cur = next_freq;
 | 
							policy->cur = next_freq;
 | 
				
			||||||
| 
						 | 
					@ -154,12 +176,12 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
 | 
				
			||||||
	return cpufreq_driver_resolve_freq(policy, freq);
 | 
						return cpufreq_driver_resolve_freq(policy, freq);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void sugov_get_util(unsigned long *util, unsigned long *max)
 | 
					static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct rq *rq = this_rq();
 | 
						struct rq *rq = cpu_rq(cpu);
 | 
				
			||||||
	unsigned long cfs_max;
 | 
						unsigned long cfs_max;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cfs_max = arch_scale_cpu_capacity(NULL, smp_processor_id());
 | 
						cfs_max = arch_scale_cpu_capacity(NULL, cpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	*util = min(rq->cfs.avg.util_avg, cfs_max);
 | 
						*util = min(rq->cfs.avg.util_avg, cfs_max);
 | 
				
			||||||
	*max = cfs_max;
 | 
						*max = cfs_max;
 | 
				
			||||||
| 
						 | 
					@ -169,30 +191,54 @@ static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
 | 
				
			||||||
				   unsigned int flags)
 | 
									   unsigned int flags)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (flags & SCHED_CPUFREQ_IOWAIT) {
 | 
						if (flags & SCHED_CPUFREQ_IOWAIT) {
 | 
				
			||||||
 | 
							if (sg_cpu->iowait_boost_pending)
 | 
				
			||||||
 | 
								return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							sg_cpu->iowait_boost_pending = true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (sg_cpu->iowait_boost) {
 | 
				
			||||||
 | 
								sg_cpu->iowait_boost <<= 1;
 | 
				
			||||||
 | 
								if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
 | 
				
			||||||
				sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
 | 
									sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
 | 
				
			||||||
 | 
							} else {
 | 
				
			||||||
 | 
								sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
	} else if (sg_cpu->iowait_boost) {
 | 
						} else if (sg_cpu->iowait_boost) {
 | 
				
			||||||
		s64 delta_ns = time - sg_cpu->last_update;
 | 
							s64 delta_ns = time - sg_cpu->last_update;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* Clear iowait_boost if the CPU apprears to have been idle. */
 | 
							/* Clear iowait_boost if the CPU apprears to have been idle. */
 | 
				
			||||||
		if (delta_ns > TICK_NSEC)
 | 
							if (delta_ns > TICK_NSEC) {
 | 
				
			||||||
			sg_cpu->iowait_boost = 0;
 | 
								sg_cpu->iowait_boost = 0;
 | 
				
			||||||
 | 
								sg_cpu->iowait_boost_pending = false;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
 | 
					static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
 | 
				
			||||||
			       unsigned long *max)
 | 
								       unsigned long *max)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long boost_util = sg_cpu->iowait_boost;
 | 
						unsigned int boost_util, boost_max;
 | 
				
			||||||
	unsigned long boost_max = sg_cpu->iowait_boost_max;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!boost_util)
 | 
						if (!sg_cpu->iowait_boost)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (sg_cpu->iowait_boost_pending) {
 | 
				
			||||||
 | 
							sg_cpu->iowait_boost_pending = false;
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							sg_cpu->iowait_boost >>= 1;
 | 
				
			||||||
 | 
							if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) {
 | 
				
			||||||
 | 
								sg_cpu->iowait_boost = 0;
 | 
				
			||||||
 | 
								return;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						boost_util = sg_cpu->iowait_boost;
 | 
				
			||||||
 | 
						boost_max = sg_cpu->iowait_boost_max;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (*util * boost_max < *max * boost_util) {
 | 
						if (*util * boost_max < *max * boost_util) {
 | 
				
			||||||
		*util = boost_util;
 | 
							*util = boost_util;
 | 
				
			||||||
		*max = boost_max;
 | 
							*max = boost_max;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	sg_cpu->iowait_boost >>= 1;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_NO_HZ_COMMON
 | 
					#ifdef CONFIG_NO_HZ_COMMON
 | 
				
			||||||
| 
						 | 
					@ -229,7 +275,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
 | 
				
			||||||
	if (flags & SCHED_CPUFREQ_RT_DL) {
 | 
						if (flags & SCHED_CPUFREQ_RT_DL) {
 | 
				
			||||||
		next_f = policy->cpuinfo.max_freq;
 | 
							next_f = policy->cpuinfo.max_freq;
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		sugov_get_util(&util, &max);
 | 
							sugov_get_util(&util, &max, sg_cpu->cpu);
 | 
				
			||||||
		sugov_iowait_boost(sg_cpu, &util, &max);
 | 
							sugov_iowait_boost(sg_cpu, &util, &max);
 | 
				
			||||||
		next_f = get_next_freq(sg_policy, util, max);
 | 
							next_f = get_next_freq(sg_policy, util, max);
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
| 
						 | 
					@ -264,6 +310,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
 | 
				
			||||||
		delta_ns = time - j_sg_cpu->last_update;
 | 
							delta_ns = time - j_sg_cpu->last_update;
 | 
				
			||||||
		if (delta_ns > TICK_NSEC) {
 | 
							if (delta_ns > TICK_NSEC) {
 | 
				
			||||||
			j_sg_cpu->iowait_boost = 0;
 | 
								j_sg_cpu->iowait_boost = 0;
 | 
				
			||||||
 | 
								j_sg_cpu->iowait_boost_pending = false;
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL)
 | 
							if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL)
 | 
				
			||||||
| 
						 | 
					@ -290,7 +337,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
 | 
				
			||||||
	unsigned long util, max;
 | 
						unsigned long util, max;
 | 
				
			||||||
	unsigned int next_f;
 | 
						unsigned int next_f;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	sugov_get_util(&util, &max);
 | 
						sugov_get_util(&util, &max, sg_cpu->cpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	raw_spin_lock(&sg_policy->update_lock);
 | 
						raw_spin_lock(&sg_policy->update_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -445,7 +492,11 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	sg_policy->thread = thread;
 | 
						sg_policy->thread = thread;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Kthread is bound to all CPUs by default */
 | 
				
			||||||
 | 
						if (!policy->dvfs_possible_from_any_cpu)
 | 
				
			||||||
		kthread_bind_mask(thread, policy->related_cpus);
 | 
							kthread_bind_mask(thread, policy->related_cpus);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	init_irq_work(&sg_policy->irq_work, sugov_irq_work);
 | 
						init_irq_work(&sg_policy->irq_work, sugov_irq_work);
 | 
				
			||||||
	mutex_init(&sg_policy->work_lock);
 | 
						mutex_init(&sg_policy->work_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -663,6 +714,11 @@ struct cpufreq_governor *cpufreq_default_governor(void)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int __init sugov_register(void)
 | 
					static int __init sugov_register(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						int cpu;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for_each_possible_cpu(cpu)
 | 
				
			||||||
 | 
							per_cpu(sugov_cpu, cpu).cpu = cpu;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return cpufreq_register_governor(&schedutil_gov);
 | 
						return cpufreq_register_governor(&schedutil_gov);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
fs_initcall(sugov_register);
 | 
					fs_initcall(sugov_register);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1136,7 +1136,7 @@ static void update_curr_dl(struct rq *rq)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* kick cpufreq (see the comment in kernel/sched/sched.h). */
 | 
						/* kick cpufreq (see the comment in kernel/sched/sched.h). */
 | 
				
			||||||
	cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_DL);
 | 
						cpufreq_update_util(rq, SCHED_CPUFREQ_DL);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	schedstat_set(curr->se.statistics.exec_max,
 | 
						schedstat_set(curr->se.statistics.exec_max,
 | 
				
			||||||
		      max(curr->se.statistics.exec_max, delta_exec));
 | 
							      max(curr->se.statistics.exec_max, delta_exec));
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3278,7 +3278,9 @@ static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
 | 
					static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (&this_rq()->cfs == cfs_rq) {
 | 
						struct rq *rq = rq_of(cfs_rq);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (&rq->cfs == cfs_rq) {
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * There are a few boundary cases this might miss but it should
 | 
							 * There are a few boundary cases this might miss but it should
 | 
				
			||||||
		 * get called often enough that that should (hopefully) not be
 | 
							 * get called often enough that that should (hopefully) not be
 | 
				
			||||||
| 
						 | 
					@ -3295,7 +3297,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
 | 
				
			||||||
		 *
 | 
							 *
 | 
				
			||||||
		 * See cpu_util().
 | 
							 * See cpu_util().
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		cpufreq_update_util(rq_of(cfs_rq), 0);
 | 
							cpufreq_update_util(rq, 0);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -4875,7 +4877,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 | 
				
			||||||
	 * passed.
 | 
						 * passed.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (p->in_iowait)
 | 
						if (p->in_iowait)
 | 
				
			||||||
		cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_IOWAIT);
 | 
							cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for_each_sched_entity(se) {
 | 
						for_each_sched_entity(se) {
 | 
				
			||||||
		if (se->on_rq)
 | 
							if (se->on_rq)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -970,7 +970,7 @@ static void update_curr_rt(struct rq *rq)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
 | 
						/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
 | 
				
			||||||
	cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
 | 
						cpufreq_update_util(rq, SCHED_CPUFREQ_RT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	schedstat_set(curr->se.statistics.exec_max,
 | 
						schedstat_set(curr->se.statistics.exec_max,
 | 
				
			||||||
		      max(curr->se.statistics.exec_max, delta_exec));
 | 
							      max(curr->se.statistics.exec_max, delta_exec));
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2070,19 +2070,13 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct update_util_data *data;
 | 
						struct update_util_data *data;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
 | 
						data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
 | 
				
			||||||
 | 
											  cpu_of(rq)));
 | 
				
			||||||
	if (data)
 | 
						if (data)
 | 
				
			||||||
		data->func(data, rq_clock(rq), flags);
 | 
							data->func(data, rq_clock(rq), flags);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (cpu_of(rq) == smp_processor_id())
 | 
					 | 
				
			||||||
		cpufreq_update_util(rq, flags);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
 | 
					static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
 | 
				
			||||||
static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {}
 | 
					 | 
				
			||||||
#endif /* CONFIG_CPU_FREQ */
 | 
					#endif /* CONFIG_CPU_FREQ */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef arch_scale_freq_capacity
 | 
					#ifdef arch_scale_freq_capacity
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue