mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	rcu: Make CPU_DYING_IDLE an explicit call
Make the RCU CPU_DYING_IDLE callback an explicit function call, so it gets invoked at the proper place. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: linux-arch@vger.kernel.org Cc: Rik van Riel <riel@redhat.com> Cc: Rafael Wysocki <rafael.j.wysocki@intel.com> Cc: "Srivatsa S. Bhat" <srivatsa@mit.edu> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Sebastian Siewior <bigeasy@linutronix.de> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Tejun Heo <tj@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Paul McKenney <paulmck@linux.vnet.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul Turner <pjt@google.com> Link: http://lkml.kernel.org/r/20160226182341.870167933@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
		
							parent
							
								
									e69aab1311
								
							
						
					
					
						commit
						27d50c7eeb
					
				
					 6 changed files with 42 additions and 41 deletions
				
			
		|  | @ -101,9 +101,7 @@ enum { | |||
| 					* Called on the new cpu, just before | ||||
| 					* enabling interrupts. Must not sleep, | ||||
| 					* must not fail */ | ||||
| #define CPU_DYING_IDLE		0x000B /* CPU (unsigned)v dying, reached | ||||
| 					* idle loop. */ | ||||
| #define CPU_BROKEN		0x000C /* CPU (unsigned)v did not die properly, | ||||
| #define CPU_BROKEN		0x000B /* CPU (unsigned)v did not die properly, | ||||
| 					* perhaps due to preemption. */ | ||||
| 
 | ||||
| /* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
 | ||||
|  |  | |||
|  | @ -47,6 +47,8 @@ | |||
|  * runtime initialization. | ||||
|  */ | ||||
| 
 | ||||
| struct notifier_block; | ||||
| 
 | ||||
| typedef	int (*notifier_fn_t)(struct notifier_block *nb, | ||||
| 			unsigned long action, void *data); | ||||
| 
 | ||||
|  |  | |||
|  | @ -332,9 +332,7 @@ void rcu_init(void); | |||
| void rcu_sched_qs(void); | ||||
| void rcu_bh_qs(void); | ||||
| void rcu_check_callbacks(int user); | ||||
| struct notifier_block; | ||||
| int rcu_cpu_notify(struct notifier_block *self, | ||||
| 		   unsigned long action, void *hcpu); | ||||
| void rcu_report_dead(unsigned int cpu); | ||||
| 
 | ||||
| #ifndef CONFIG_TINY_RCU | ||||
| void rcu_end_inkernel_boot(void); | ||||
|  |  | |||
|  | @ -762,6 +762,7 @@ void cpuhp_report_idle_dead(void) | |||
| 	BUG_ON(st->state != CPUHP_AP_OFFLINE); | ||||
| 	st->state = CPUHP_AP_IDLE_DEAD; | ||||
| 	complete(&st->done); | ||||
| 	rcu_report_dead(smp_processor_id()); | ||||
| } | ||||
| 
 | ||||
| #else | ||||
|  |  | |||
|  | @ -2606,28 +2606,6 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) | |||
| 	} | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * The CPU is exiting the idle loop into the arch_cpu_idle_dead() | ||||
|  * function.  We now remove it from the rcu_node tree's ->qsmaskinit | ||||
|  * bit masks. | ||||
|  */ | ||||
| static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp) | ||||
| { | ||||
| 	unsigned long flags; | ||||
| 	unsigned long mask; | ||||
| 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | ||||
| 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */ | ||||
| 
 | ||||
| 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) | ||||
| 		return; | ||||
| 
 | ||||
| 	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */ | ||||
| 	mask = rdp->grpmask; | ||||
| 	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ | ||||
| 	rnp->qsmaskinitnext &= ~mask; | ||||
| 	raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * The CPU has been completely removed, and some other CPU is reporting | ||||
|  * this fact from process context.  Do the remainder of the cleanup, | ||||
|  | @ -4247,6 +4225,43 @@ static void rcu_prepare_cpu(int cpu) | |||
| 		rcu_init_percpu_data(cpu, rsp); | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_HOTPLUG_CPU | ||||
| /*
 | ||||
|  * The CPU is exiting the idle loop into the arch_cpu_idle_dead() | ||||
|  * function.  We now remove it from the rcu_node tree's ->qsmaskinit | ||||
|  * bit masks. | ||||
|  */ | ||||
| static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp) | ||||
| { | ||||
| 	unsigned long flags; | ||||
| 	unsigned long mask; | ||||
| 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | ||||
| 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */ | ||||
| 
 | ||||
| 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) | ||||
| 		return; | ||||
| 
 | ||||
| 	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */ | ||||
| 	mask = rdp->grpmask; | ||||
| 	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ | ||||
| 	rnp->qsmaskinitnext &= ~mask; | ||||
| 	raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||||
| } | ||||
| 
 | ||||
| void rcu_report_dead(unsigned int cpu) | ||||
| { | ||||
| 	struct rcu_state *rsp; | ||||
| 
 | ||||
| 	/* QS for any half-done expedited RCU-sched GP. */ | ||||
| 	preempt_disable(); | ||||
| 	rcu_report_exp_rdp(&rcu_sched_state, | ||||
| 			   this_cpu_ptr(rcu_sched_state.rda), true); | ||||
| 	preempt_enable(); | ||||
| 	for_each_rcu_flavor(rsp) | ||||
| 		rcu_cleanup_dying_idle_cpu(cpu, rsp); | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
| /*
 | ||||
|  * Handle CPU online/offline notification events. | ||||
|  */ | ||||
|  | @ -4278,17 +4293,6 @@ int rcu_cpu_notify(struct notifier_block *self, | |||
| 		for_each_rcu_flavor(rsp) | ||||
| 			rcu_cleanup_dying_cpu(rsp); | ||||
| 		break; | ||||
| 	case CPU_DYING_IDLE: | ||||
| 		/* QS for any half-done expedited RCU-sched GP. */ | ||||
| 		preempt_disable(); | ||||
| 		rcu_report_exp_rdp(&rcu_sched_state, | ||||
| 				   this_cpu_ptr(rcu_sched_state.rda), true); | ||||
| 		preempt_enable(); | ||||
| 
 | ||||
| 		for_each_rcu_flavor(rsp) { | ||||
| 			rcu_cleanup_dying_idle_cpu(cpu, rsp); | ||||
| 		} | ||||
| 		break; | ||||
| 	case CPU_DEAD: | ||||
| 	case CPU_DEAD_FROZEN: | ||||
| 	case CPU_UP_CANCELED: | ||||
|  |  | |||
|  | @ -220,8 +220,6 @@ static void cpu_idle_loop(void) | |||
| 			rmb(); | ||||
| 
 | ||||
| 			if (cpu_is_offline(smp_processor_id())) { | ||||
| 				rcu_cpu_notify(NULL, CPU_DYING_IDLE, | ||||
| 					       (void *)(long)smp_processor_id()); | ||||
| 				cpuhp_report_idle_dead(); | ||||
| 				arch_cpu_idle_dead(); | ||||
| 			} | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Thomas Gleixner
						Thomas Gleixner