forked from mirrors/linux
		
	stop_machine: Use smpboot threads
Use the smpboot thread infrastructure. Mark the stopper thread selfparking and park it after it has finished the take_cpu_down() work. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Paul McKenney <paulmck@linux.vnet.ibm.com> Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Cc: Arjan van de Veen <arjan@infradead.org> Cc: Paul Turner <pjt@google.com> Cc: Richard Weinberger <rw@linutronix.de> Cc: Magnus Damm <magnus.damm@gmail.com> Link: http://lkml.kernel.org/r/20130131120741.686315164@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
		
							parent
							
								
									860a0ffaa3
								
							
						
					
					
						commit
						14e568e78f
					
				
					 2 changed files with 55 additions and 89 deletions
				
			
		| 
						 | 
					@ -254,6 +254,8 @@ static int __ref take_cpu_down(void *_param)
 | 
				
			||||||
		return err;
 | 
							return err;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cpu_notify(CPU_DYING | param->mod, param->hcpu);
 | 
						cpu_notify(CPU_DYING | param->mod, param->hcpu);
 | 
				
			||||||
 | 
						/* Park the stopper thread */
 | 
				
			||||||
 | 
						kthread_park(current);
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -18,7 +18,7 @@
 | 
				
			||||||
#include <linux/stop_machine.h>
 | 
					#include <linux/stop_machine.h>
 | 
				
			||||||
#include <linux/interrupt.h>
 | 
					#include <linux/interrupt.h>
 | 
				
			||||||
#include <linux/kallsyms.h>
 | 
					#include <linux/kallsyms.h>
 | 
				
			||||||
 | 
					#include <linux/smpboot.h>
 | 
				
			||||||
#include <linux/atomic.h>
 | 
					#include <linux/atomic.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -245,20 +245,25 @@ int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int cpu_stopper_thread(void *data)
 | 
					static int cpu_stop_should_run(unsigned int cpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct cpu_stopper *stopper = data;
 | 
						struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 | 
				
			||||||
 | 
						unsigned long flags;
 | 
				
			||||||
 | 
						int run;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						spin_lock_irqsave(&stopper->lock, flags);
 | 
				
			||||||
 | 
						run = !list_empty(&stopper->works);
 | 
				
			||||||
 | 
						spin_unlock_irqrestore(&stopper->lock, flags);
 | 
				
			||||||
 | 
						return run;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void cpu_stopper_thread(unsigned int cpu)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 | 
				
			||||||
	struct cpu_stop_work *work;
 | 
						struct cpu_stop_work *work;
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
repeat:
 | 
					repeat:
 | 
				
			||||||
	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (kthread_should_stop()) {
 | 
					 | 
				
			||||||
		__set_current_state(TASK_RUNNING);
 | 
					 | 
				
			||||||
		return 0;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	work = NULL;
 | 
						work = NULL;
 | 
				
			||||||
	spin_lock_irq(&stopper->lock);
 | 
						spin_lock_irq(&stopper->lock);
 | 
				
			||||||
	if (!list_empty(&stopper->works)) {
 | 
						if (!list_empty(&stopper->works)) {
 | 
				
			||||||
| 
						 | 
					@ -274,8 +279,6 @@ static int cpu_stopper_thread(void *data)
 | 
				
			||||||
		struct cpu_stop_done *done = work->done;
 | 
							struct cpu_stop_done *done = work->done;
 | 
				
			||||||
		char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
 | 
							char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		__set_current_state(TASK_RUNNING);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		/* cpu stop callbacks are not allowed to sleep */
 | 
							/* cpu stop callbacks are not allowed to sleep */
 | 
				
			||||||
		preempt_disable();
 | 
							preempt_disable();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -291,87 +294,55 @@ static int cpu_stopper_thread(void *data)
 | 
				
			||||||
					  ksym_buf), arg);
 | 
										  ksym_buf), arg);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		cpu_stop_signal_done(done, true);
 | 
							cpu_stop_signal_done(done, true);
 | 
				
			||||||
	} else
 | 
					 | 
				
			||||||
		schedule();
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		goto repeat;
 | 
							goto repeat;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern void sched_set_stop_task(int cpu, struct task_struct *stop);
 | 
					extern void sched_set_stop_task(int cpu, struct task_struct *stop);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */
 | 
					static void cpu_stop_create(unsigned int cpu)
 | 
				
			||||||
static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
 | 
					{
 | 
				
			||||||
					   unsigned long action, void *hcpu)
 | 
						sched_set_stop_task(cpu, per_cpu(cpu_stopper_task, cpu));
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void cpu_stop_park(unsigned int cpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned int cpu = (unsigned long)hcpu;
 | 
					 | 
				
			||||||
	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 | 
						struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 | 
				
			||||||
	struct task_struct *p = per_cpu(cpu_stopper_task, cpu);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	switch (action & ~CPU_TASKS_FROZEN) {
 | 
					 | 
				
			||||||
	case CPU_UP_PREPARE:
 | 
					 | 
				
			||||||
		BUG_ON(p || stopper->enabled || !list_empty(&stopper->works));
 | 
					 | 
				
			||||||
		p = kthread_create_on_node(cpu_stopper_thread,
 | 
					 | 
				
			||||||
					   stopper,
 | 
					 | 
				
			||||||
					   cpu_to_node(cpu),
 | 
					 | 
				
			||||||
					   "migration/%d", cpu);
 | 
					 | 
				
			||||||
		if (IS_ERR(p))
 | 
					 | 
				
			||||||
			return notifier_from_errno(PTR_ERR(p));
 | 
					 | 
				
			||||||
		get_task_struct(p);
 | 
					 | 
				
			||||||
		kthread_bind(p, cpu);
 | 
					 | 
				
			||||||
		sched_set_stop_task(cpu, p);
 | 
					 | 
				
			||||||
		per_cpu(cpu_stopper_task, cpu) = p;
 | 
					 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	case CPU_ONLINE:
 | 
					 | 
				
			||||||
		/* strictly unnecessary, as first user will wake it */
 | 
					 | 
				
			||||||
		wake_up_process(p);
 | 
					 | 
				
			||||||
		/* mark enabled */
 | 
					 | 
				
			||||||
		spin_lock_irq(&stopper->lock);
 | 
					 | 
				
			||||||
		stopper->enabled = true;
 | 
					 | 
				
			||||||
		spin_unlock_irq(&stopper->lock);
 | 
					 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifdef CONFIG_HOTPLUG_CPU
 | 
					 | 
				
			||||||
	case CPU_UP_CANCELED:
 | 
					 | 
				
			||||||
	case CPU_POST_DEAD:
 | 
					 | 
				
			||||||
	{
 | 
					 | 
				
			||||||
	struct cpu_stop_work *work;
 | 
						struct cpu_stop_work *work;
 | 
				
			||||||
 | 
						unsigned long flags;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		sched_set_stop_task(cpu, NULL);
 | 
					 | 
				
			||||||
		/* kill the stopper */
 | 
					 | 
				
			||||||
		kthread_stop(p);
 | 
					 | 
				
			||||||
	/* drain remaining works */
 | 
						/* drain remaining works */
 | 
				
			||||||
		spin_lock_irq(&stopper->lock);
 | 
						spin_lock_irqsave(&stopper->lock, flags);
 | 
				
			||||||
	list_for_each_entry(work, &stopper->works, list)
 | 
						list_for_each_entry(work, &stopper->works, list)
 | 
				
			||||||
		cpu_stop_signal_done(work->done, false);
 | 
							cpu_stop_signal_done(work->done, false);
 | 
				
			||||||
	stopper->enabled = false;
 | 
						stopper->enabled = false;
 | 
				
			||||||
		spin_unlock_irq(&stopper->lock);
 | 
						spin_unlock_irqrestore(&stopper->lock, flags);
 | 
				
			||||||
		/* release the stopper */
 | 
					 | 
				
			||||||
		put_task_struct(p);
 | 
					 | 
				
			||||||
		per_cpu(cpu_stopper_task, cpu) = NULL;
 | 
					 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return NOTIFY_OK;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					static void cpu_stop_unpark(unsigned int cpu)
 | 
				
			||||||
 * Give it a higher priority so that cpu stopper is available to other
 | 
					{
 | 
				
			||||||
 * cpu notifiers.  It currently shares the same priority as sched
 | 
						struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 | 
				
			||||||
 * migration_notifier.
 | 
					
 | 
				
			||||||
 */
 | 
						spin_lock_irq(&stopper->lock);
 | 
				
			||||||
static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
 | 
						stopper->enabled = true;
 | 
				
			||||||
	.notifier_call	= cpu_stop_cpu_callback,
 | 
						spin_unlock_irq(&stopper->lock);
 | 
				
			||||||
	.priority	= 10,
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static struct smp_hotplug_thread cpu_stop_threads = {
 | 
				
			||||||
 | 
						.store			= &cpu_stopper_task,
 | 
				
			||||||
 | 
						.thread_should_run	= cpu_stop_should_run,
 | 
				
			||||||
 | 
						.thread_fn		= cpu_stopper_thread,
 | 
				
			||||||
 | 
						.thread_comm		= "migration/%u",
 | 
				
			||||||
 | 
						.create			= cpu_stop_create,
 | 
				
			||||||
 | 
						.setup			= cpu_stop_unpark,
 | 
				
			||||||
 | 
						.park			= cpu_stop_park,
 | 
				
			||||||
 | 
						.unpark			= cpu_stop_unpark,
 | 
				
			||||||
 | 
						.selfparking		= true,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int __init cpu_stop_init(void)
 | 
					static int __init cpu_stop_init(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	void *bcpu = (void *)(long)smp_processor_id();
 | 
					 | 
				
			||||||
	unsigned int cpu;
 | 
						unsigned int cpu;
 | 
				
			||||||
	int err;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for_each_possible_cpu(cpu) {
 | 
						for_each_possible_cpu(cpu) {
 | 
				
			||||||
		struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 | 
							struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 | 
				
			||||||
| 
						 | 
					@ -380,15 +351,8 @@ static int __init cpu_stop_init(void)
 | 
				
			||||||
		INIT_LIST_HEAD(&stopper->works);
 | 
							INIT_LIST_HEAD(&stopper->works);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* start one for the boot cpu */
 | 
						BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
 | 
				
			||||||
	err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE,
 | 
					 | 
				
			||||||
				    bcpu);
 | 
					 | 
				
			||||||
	BUG_ON(err != NOTIFY_OK);
 | 
					 | 
				
			||||||
	cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu);
 | 
					 | 
				
			||||||
	register_cpu_notifier(&cpu_stop_cpu_notifier);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	stop_machine_initialized = true;
 | 
						stop_machine_initialized = true;
 | 
				
			||||||
 | 
					 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
early_initcall(cpu_stop_init);
 | 
					early_initcall(cpu_stop_init);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue