forked from mirrors/linux
		
	x86: Use generic idle loop
Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Paul McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Reviewed-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Cc: Magnus Damm <magnus.damm@gmail.com> Link: http://lkml.kernel.org/r/20130321215235.486594473@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: x86@kernel.org
This commit is contained in:
		
							parent
							
								
									aba92c9e2c
								
							
						
					
					
						commit
						7d1a941731
					
				
					 4 changed files with 31 additions and 81 deletions
				
			
		|  | @ -97,6 +97,7 @@ config X86 | |||
| 	select GENERIC_IOMAP | ||||
| 	select DCACHE_WORD_ACCESS | ||||
| 	select GENERIC_SMP_IDLE_THREAD | ||||
| 	select GENERIC_IDLE_LOOP | ||||
| 	select ARCH_WANT_IPC_PARSE_VERSION if X86_32 | ||||
| 	select HAVE_ARCH_SECCOMP_FILTER | ||||
| 	select BUILDTIME_EXTABLE_SORT | ||||
|  |  | |||
|  | @ -301,13 +301,7 @@ void exit_idle(void) | |||
| } | ||||
| #endif | ||||
| 
 | ||||
| /*
 | ||||
|  * The idle thread. There's no useful work to be | ||||
|  * done, so just try to conserve power and have a | ||||
|  * low exit latency (ie sit in a loop waiting for | ||||
|  * somebody to say that they'd like to reschedule) | ||||
|  */ | ||||
| void cpu_idle(void) | ||||
| void arch_cpu_idle_prepare(void) | ||||
| { | ||||
| 	/*
 | ||||
| 	 * If we're the non-boot CPU, nothing set the stack canary up | ||||
|  | @ -317,71 +311,40 @@ void cpu_idle(void) | |||
| 	 * canaries already on the stack wont ever trigger). | ||||
| 	 */ | ||||
| 	boot_init_stack_canary(); | ||||
| 	current_thread_info()->status |= TS_POLLING; | ||||
| } | ||||
| 
 | ||||
| 	while (1) { | ||||
| 		tick_nohz_idle_enter(); | ||||
| void arch_cpu_idle_enter(void) | ||||
| { | ||||
| 	local_touch_nmi(); | ||||
| 	enter_idle(); | ||||
| } | ||||
| 
 | ||||
| 		while (!need_resched()) { | ||||
| 			rmb(); | ||||
| void arch_cpu_idle_exit(void) | ||||
| { | ||||
| 	__exit_idle(); | ||||
| } | ||||
| 
 | ||||
| 			if (cpu_is_offline(smp_processor_id())) | ||||
| 				play_dead(); | ||||
| 
 | ||||
| 			/*
 | ||||
| 			 * Idle routines should keep interrupts disabled | ||||
| 			 * from here on, until they go to idle. | ||||
| 			 * Otherwise, idle callbacks can misfire. | ||||
| 			 */ | ||||
| 			local_touch_nmi(); | ||||
| 			local_irq_disable(); | ||||
| 
 | ||||
| 			enter_idle(); | ||||
| 
 | ||||
| 			/* Don't trace irqs off for idle */ | ||||
| 			stop_critical_timings(); | ||||
| 
 | ||||
| 			/* enter_idle() needs rcu for notifiers */ | ||||
| 			rcu_idle_enter(); | ||||
| 
 | ||||
| 			if (cpuidle_idle_call()) | ||||
| 				x86_idle(); | ||||
| 
 | ||||
| 			rcu_idle_exit(); | ||||
| 			start_critical_timings(); | ||||
| 
 | ||||
| 			/* In many cases the interrupt that ended idle
 | ||||
| 			   has already called exit_idle. But some idle | ||||
| 			   loops can be woken up without interrupt. */ | ||||
| 			__exit_idle(); | ||||
| 		} | ||||
| 
 | ||||
| 		tick_nohz_idle_exit(); | ||||
| 		preempt_enable_no_resched(); | ||||
| 		schedule(); | ||||
| 		preempt_disable(); | ||||
| 	} | ||||
| void arch_cpu_idle_dead(void) | ||||
| { | ||||
| 	play_dead(); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * We use this if we don't have any better | ||||
|  * idle routine.. | ||||
|  * Called from the generic idle code. | ||||
|  */ | ||||
| void arch_cpu_idle(void) | ||||
| { | ||||
| 	if (cpuidle_idle_call()) | ||||
| 		x86_idle(); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * We use this if we don't have any better idle routine.. | ||||
|  */ | ||||
| void default_idle(void) | ||||
| { | ||||
| 	trace_cpu_idle_rcuidle(1, smp_processor_id()); | ||||
| 	current_thread_info()->status &= ~TS_POLLING; | ||||
| 	/*
 | ||||
| 	 * TS_POLLING-cleared state must be visible before we | ||||
| 	 * test NEED_RESCHED: | ||||
| 	 */ | ||||
| 	smp_mb(); | ||||
| 
 | ||||
| 	if (!need_resched()) | ||||
| 		safe_halt();	/* enables interrupts racelessly */ | ||||
| 	else | ||||
| 		local_irq_enable(); | ||||
| 	current_thread_info()->status |= TS_POLLING; | ||||
| 	safe_halt(); | ||||
| 	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); | ||||
| } | ||||
| #ifdef CONFIG_APM_MODULE | ||||
|  | @ -411,20 +374,6 @@ void stop_this_cpu(void *dummy) | |||
| 		halt(); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * On SMP it's slightly faster (but much more power-consuming!) | ||||
|  * to poll the ->work.need_resched flag instead of waiting for the | ||||
|  * cross-CPU IPI to arrive. Use this option with caution. | ||||
|  */ | ||||
| static void poll_idle(void) | ||||
| { | ||||
| 	trace_cpu_idle_rcuidle(0, smp_processor_id()); | ||||
| 	local_irq_enable(); | ||||
| 	while (!need_resched()) | ||||
| 		cpu_relax(); | ||||
| 	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); | ||||
| } | ||||
| 
 | ||||
| bool amd_e400_c1e_detected; | ||||
| EXPORT_SYMBOL(amd_e400_c1e_detected); | ||||
| 
 | ||||
|  | @ -489,10 +438,10 @@ static void amd_e400_idle(void) | |||
| void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | ||||
| { | ||||
| #ifdef CONFIG_SMP | ||||
| 	if (x86_idle == poll_idle && smp_num_siblings > 1) | ||||
| 	if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1) | ||||
| 		pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); | ||||
| #endif | ||||
| 	if (x86_idle) | ||||
| 	if (x86_idle || boot_option_idle_override == IDLE_POLL) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (cpu_has_amd_erratum(amd_erratum_400)) { | ||||
|  | @ -517,8 +466,8 @@ static int __init idle_setup(char *str) | |||
| 
 | ||||
| 	if (!strcmp(str, "poll")) { | ||||
| 		pr_info("using polling idle threads\n"); | ||||
| 		x86_idle = poll_idle; | ||||
| 		boot_option_idle_override = IDLE_POLL; | ||||
| 		cpu_idle_poll_ctrl(true); | ||||
| 	} else if (!strcmp(str, "halt")) { | ||||
| 		/*
 | ||||
| 		 * When the boot option of idle=halt is added, halt is | ||||
|  |  | |||
|  | @ -284,7 +284,7 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
| 	x86_cpuinit.setup_percpu_clockev(); | ||||
| 
 | ||||
| 	wmb(); | ||||
| 	cpu_idle(); | ||||
| 	cpu_startup_entry(CPUHP_ONLINE); | ||||
| } | ||||
| 
 | ||||
| void __init smp_store_boot_cpu_info(void) | ||||
|  |  | |||
|  | @ -95,7 +95,7 @@ static void __cpuinit cpu_bringup(void) | |||
| static void __cpuinit cpu_bringup_and_idle(void) | ||||
| { | ||||
| 	cpu_bringup(); | ||||
| 	cpu_idle(); | ||||
| 	cpu_startup_entry(CPUHP_ONLINE); | ||||
| } | ||||
| 
 | ||||
| static int xen_smp_intr_init(unsigned int cpu) | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Thomas Gleixner
						Thomas Gleixner