mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	sched/deadline: Check bandwidth overflow earlier for hotplug
Currently we check for bandwidth overflow potentially due to hotplug operations at the end of sched_cpu_deactivate(), after the cpu going offline has already been removed from scheduling, active_mask, etc. This can create issues for DEADLINE tasks, as there is a substantial race window between the start of sched_cpu_deactivate() and the moment we possibly decide to roll-back the operation if dl_bw_deactivate() returns failure in cpuset_cpu_inactive(). An example is a throttled task that sees its replenishment timer firing while the cpu it was previously running on is considered offline, but before dl_bw_deactivate() had a chance to say no and roll-back happened. Fix this by directly calling dl_bw_deactivate() first thing in sched_cpu_deactivate() and do the required calculation in the former function considering the cpu passed as an argument as offline already. By doing so we also simplify sched_cpu_deactivate(), as there is no need anymore for any kind of roll-back if we fail early. Signed-off-by: Juri Lelli <juri.lelli@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Phil Auld <pauld@redhat.com> Tested-by: Waiman Long <longman@redhat.com> Link: https://lore.kernel.org/r/Zzc1DfPhbvqDDIJR@jlelli-thinkpadt14gen4.remote.csb
This commit is contained in:
		
							parent
							
								
									d4742f6ed7
								
							
						
					
					
						commit
						53916d5fd3
					
				
					 2 changed files with 17 additions and 17 deletions
				
			
		| 
						 | 
					@ -8182,19 +8182,14 @@ static void cpuset_cpu_active(void)
 | 
				
			||||||
	cpuset_update_active_cpus();
 | 
						cpuset_update_active_cpus();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int cpuset_cpu_inactive(unsigned int cpu)
 | 
					static void cpuset_cpu_inactive(unsigned int cpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (!cpuhp_tasks_frozen) {
 | 
						if (!cpuhp_tasks_frozen) {
 | 
				
			||||||
		int ret = dl_bw_deactivate(cpu);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (ret)
 | 
					 | 
				
			||||||
			return ret;
 | 
					 | 
				
			||||||
		cpuset_update_active_cpus();
 | 
							cpuset_update_active_cpus();
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		num_cpus_frozen++;
 | 
							num_cpus_frozen++;
 | 
				
			||||||
		partition_sched_domains(1, NULL, NULL);
 | 
							partition_sched_domains(1, NULL, NULL);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return 0;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void sched_smt_present_inc(int cpu)
 | 
					static inline void sched_smt_present_inc(int cpu)
 | 
				
			||||||
| 
						 | 
					@ -8256,6 +8251,11 @@ int sched_cpu_deactivate(unsigned int cpu)
 | 
				
			||||||
	struct rq *rq = cpu_rq(cpu);
 | 
						struct rq *rq = cpu_rq(cpu);
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = dl_bw_deactivate(cpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (ret)
 | 
				
			||||||
 | 
							return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
 | 
						 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
 | 
				
			||||||
	 * load balancing when not active
 | 
						 * load balancing when not active
 | 
				
			||||||
| 
						 | 
					@ -8301,15 +8301,7 @@ int sched_cpu_deactivate(unsigned int cpu)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	sched_update_numa(cpu, false);
 | 
						sched_update_numa(cpu, false);
 | 
				
			||||||
	ret = cpuset_cpu_inactive(cpu);
 | 
						cpuset_cpu_inactive(cpu);
 | 
				
			||||||
	if (ret) {
 | 
					 | 
				
			||||||
		sched_smt_present_inc(cpu);
 | 
					 | 
				
			||||||
		sched_set_rq_online(rq, cpu);
 | 
					 | 
				
			||||||
		balance_push_set(cpu, false);
 | 
					 | 
				
			||||||
		set_cpu_active(cpu, true);
 | 
					 | 
				
			||||||
		sched_update_numa(cpu, true);
 | 
					 | 
				
			||||||
		return ret;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	sched_domains_numa_masks_clear(cpu);
 | 
						sched_domains_numa_masks_clear(cpu);
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3495,6 +3495,13 @@ static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	case dl_bw_req_deactivate:
 | 
						case dl_bw_req_deactivate:
 | 
				
			||||||
 | 
							/*
 | 
				
			||||||
 | 
							 * cpu is not off yet, but we need to do the math by
 | 
				
			||||||
 | 
							 * considering it off already (i.e., what would happen if we
 | 
				
			||||||
 | 
							 * turn cpu off?).
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
 | 
							cap -= arch_scale_cpu_capacity(cpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * cpu is going offline and NORMAL tasks will be moved away
 | 
							 * cpu is going offline and NORMAL tasks will be moved away
 | 
				
			||||||
		 * from it. We can thus discount dl_server bandwidth
 | 
							 * from it. We can thus discount dl_server bandwidth
 | 
				
			||||||
| 
						 | 
					@ -3512,9 +3519,10 @@ static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
 | 
				
			||||||
		if (dl_b->total_bw - fair_server_bw > 0) {
 | 
							if (dl_b->total_bw - fair_server_bw > 0) {
 | 
				
			||||||
			/*
 | 
								/*
 | 
				
			||||||
			 * Leaving at least one CPU for DEADLINE tasks seems a
 | 
								 * Leaving at least one CPU for DEADLINE tasks seems a
 | 
				
			||||||
			 * wise thing to do.
 | 
								 * wise thing to do. As said above, cpu is not offline
 | 
				
			||||||
 | 
								 * yet, so account for that.
 | 
				
			||||||
			 */
 | 
								 */
 | 
				
			||||||
			if (dl_bw_cpus(cpu))
 | 
								if (dl_bw_cpus(cpu) - 1)
 | 
				
			||||||
				overflow = __dl_overflow(dl_b, cap, fair_server_bw, 0);
 | 
									overflow = __dl_overflow(dl_b, cap, fair_server_bw, 0);
 | 
				
			||||||
			else
 | 
								else
 | 
				
			||||||
				overflow = 1;
 | 
									overflow = 1;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue