mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	sched/hotplug: Move sync_rcu to be with set_cpu_active(false)
The sync_rcu stuff is specificically for clearing bits in the active mask, such that everybody will observe the bit cleared and will not consider the cleared CPU for load-balancing etc. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160310120025.169219710@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
		
							parent
							
								
									40190a78f8
								
							
						
					
					
						commit
						b2454caa89
					
				
					 2 changed files with 14 additions and 15 deletions
				
			
		
							
								
								
									
										15
									
								
								kernel/cpu.c
									
									
									
									
									
								
							
							
						
						
									
										15
									
								
								kernel/cpu.c
									
									
									
									
									
								
							|  | @ -703,21 +703,6 @@ static int takedown_cpu(unsigned int cpu) | ||||||
| 	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); | 	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); | ||||||
| 	int err; | 	int err; | ||||||
| 
 | 
 | ||||||
| 	/*
 |  | ||||||
| 	 * By now we've cleared cpu_active_mask, wait for all preempt-disabled |  | ||||||
| 	 * and RCU users of this state to go away such that all new such users |  | ||||||
| 	 * will observe it. |  | ||||||
| 	 * |  | ||||||
| 	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might |  | ||||||
| 	 * not imply sync_sched(), so wait for both. |  | ||||||
| 	 * |  | ||||||
| 	 * Do sync before park smpboot threads to take care the rcu boost case. |  | ||||||
| 	 */ |  | ||||||
| 	if (IS_ENABLED(CONFIG_PREEMPT)) |  | ||||||
| 		synchronize_rcu_mult(call_rcu, call_rcu_sched); |  | ||||||
| 	else |  | ||||||
| 		synchronize_rcu(); |  | ||||||
| 
 |  | ||||||
| 	/* Park the smpboot threads */ | 	/* Park the smpboot threads */ | ||||||
| 	kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread); | 	kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread); | ||||||
| 	smpboot_park_threads(cpu); | 	smpboot_park_threads(cpu); | ||||||
|  |  | ||||||
|  | @ -7112,6 +7112,20 @@ int sched_cpu_deactivate(unsigned int cpu) | ||||||
| 	int ret; | 	int ret; | ||||||
| 
 | 
 | ||||||
| 	set_cpu_active(cpu, false); | 	set_cpu_active(cpu, false); | ||||||
|  | 	/*
 | ||||||
|  | 	 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU | ||||||
|  | 	 * users of this state to go away such that all new such users will | ||||||
|  | 	 * observe it. | ||||||
|  | 	 * | ||||||
|  | 	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might | ||||||
|  | 	 * not imply sync_sched(), so wait for both. | ||||||
|  | 	 * | ||||||
|  | 	 * Do sync before park smpboot threads to take care the rcu boost case. | ||||||
|  | 	 */ | ||||||
|  | 	if (IS_ENABLED(CONFIG_PREEMPT)) | ||||||
|  | 		synchronize_rcu_mult(call_rcu, call_rcu_sched); | ||||||
|  | 	else | ||||||
|  | 		synchronize_rcu(); | ||||||
| 
 | 
 | ||||||
| 	if (!sched_smp_initialized) | 	if (!sched_smp_initialized) | ||||||
| 		return 0; | 		return 0; | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Peter Zijlstra
						Peter Zijlstra