mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 08:38:45 +02:00 
			
		
		
		
	sched/core: Fix illegal RCU from offline CPUs
In the CPU-offline process, it calls mmdrop() after idle entry and the subsequent call to cpuhp_report_idle_dead(). Once execution passes the call to rcu_report_dead(), RCU is ignoring the CPU, which results in lockdep complaining when mmdrop() uses RCU from either memcg or debugobjects below. Fix it by cleaning up the active_mm state from BP instead. Every arch which has CONFIG_HOTPLUG_CPU should have already called idle_task_exit() from AP. The only exception is parisc because it switches them to &init_mm unconditionally (see smp_boot_one_cpu() and smp_cpu_init()), but the patch will still work there because it calls mmgrab(&init_mm) in smp_cpu_init() and then should call mmdrop(&init_mm) in finish_cpu(). WARNING: suspicious RCU usage ----------------------------- kernel/workqueue.c:710 RCU or wq_pool_mutex should be held! other info that might help us debug this: RCU used illegally from offline CPU! Call Trace: dump_stack+0xf4/0x164 (unreliable) lockdep_rcu_suspicious+0x140/0x164 get_work_pool+0x110/0x150 __queue_work+0x1bc/0xca0 queue_work_on+0x114/0x120 css_release+0x9c/0xc0 percpu_ref_put_many+0x204/0x230 free_pcp_prepare+0x264/0x570 free_unref_page+0x38/0xf0 __mmdrop+0x21c/0x2c0 idle_task_exit+0x170/0x1b0 pnv_smp_cpu_kill_self+0x38/0x2e0 cpu_die+0x48/0x64 arch_cpu_idle_dead+0x30/0x50 do_idle+0x2f4/0x470 cpu_startup_entry+0x38/0x40 start_secondary+0x7a8/0xa80 start_secondary_resume+0x10/0x14 Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Qian Cai <cai@lca.pw> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc) Link: https://lkml.kernel.org/r/20200401214033.8448-1-cai@lca.pw
This commit is contained in:
		
							parent
							
								
									f38f12d1e0
								
							
						
					
					
						commit
						bf2c59fce4
					
				
					 4 changed files with 22 additions and 4 deletions
				
			
		|  | @ -167,7 +167,6 @@ static void pnv_smp_cpu_kill_self(void) | |||
| 	/* Standard hot unplug procedure */ | ||||
| 
 | ||||
| 	idle_task_exit(); | ||||
| 	current->active_mm = NULL; /* for sanity */ | ||||
| 	cpu = smp_processor_id(); | ||||
| 	DBG("CPU%d offline\n", cpu); | ||||
| 	generic_set_cpu_dead(cpu); | ||||
|  |  | |||
|  | @ -49,6 +49,8 @@ static inline void mmdrop(struct mm_struct *mm) | |||
| 		__mmdrop(mm); | ||||
| } | ||||
| 
 | ||||
| void mmdrop(struct mm_struct *mm); | ||||
| 
 | ||||
| /*
 | ||||
|  * This has to be called after a get_task_mm()/mmget_not_zero() | ||||
|  * followed by taking the mmap_sem for writing before modifying the | ||||
|  |  | |||
							
								
								
									
										18
									
								
								kernel/cpu.c
									
									
									
									
									
								
							
							
						
						
									
										18
									
								
								kernel/cpu.c
									
									
									
									
									
								
							|  | @ -3,6 +3,7 @@ | |||
|  * | ||||
|  * This code is licenced under the GPL. | ||||
|  */ | ||||
| #include <linux/sched/mm.h> | ||||
| #include <linux/proc_fs.h> | ||||
| #include <linux/smp.h> | ||||
| #include <linux/init.h> | ||||
|  | @ -564,6 +565,21 @@ static int bringup_cpu(unsigned int cpu) | |||
| 	return bringup_wait_for_ap(cpu); | ||||
| } | ||||
| 
 | ||||
| static int finish_cpu(unsigned int cpu) | ||||
| { | ||||
| 	struct task_struct *idle = idle_thread_get(cpu); | ||||
| 	struct mm_struct *mm = idle->active_mm; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * idle_task_exit() will have switched to &init_mm, now | ||||
| 	 * clean up any remaining active_mm state. | ||||
| 	 */ | ||||
| 	if (mm != &init_mm) | ||||
| 		idle->active_mm = &init_mm; | ||||
| 	mmdrop(mm); | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Hotplug state machine related functions | ||||
|  */ | ||||
|  | @ -1549,7 +1565,7 @@ static struct cpuhp_step cpuhp_hp_states[] = { | |||
| 	[CPUHP_BRINGUP_CPU] = { | ||||
| 		.name			= "cpu:bringup", | ||||
| 		.startup.single		= bringup_cpu, | ||||
| 		.teardown.single	= NULL, | ||||
| 		.teardown.single	= finish_cpu, | ||||
| 		.cant_stop		= true, | ||||
| 	}, | ||||
| 	/* Final state before CPU kills itself */ | ||||
|  |  | |||
|  | @ -6197,13 +6197,14 @@ void idle_task_exit(void) | |||
| 	struct mm_struct *mm = current->active_mm; | ||||
| 
 | ||||
| 	BUG_ON(cpu_online(smp_processor_id())); | ||||
| 	BUG_ON(current != this_rq()->idle); | ||||
| 
 | ||||
| 	if (mm != &init_mm) { | ||||
| 		switch_mm(mm, &init_mm, current); | ||||
| 		current->active_mm = &init_mm; | ||||
| 		finish_arch_post_lock_switch(); | ||||
| 	} | ||||
| 	mmdrop(mm); | ||||
| 
 | ||||
| 	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */ | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Peter Zijlstra
						Peter Zijlstra