mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	x86, acpi, idle: Restructure the mwait idle routines
People seem to delight in writing wrong and broken mwait idle routines; collapse the lot. This leaves mwait_play_dead() the sole remaining user of __mwait() and new __mwait() users are probably doing it wrong. Also remove __sti_mwait() as its unused. Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Jacob Jun Pan <jacob.jun.pan@linux.intel.com> Cc: Mike Galbraith <bitbucket@online.de> Cc: Len Brown <lenb@kernel.org> Cc: Rui Zhang <rui.zhang@intel.com> Acked-by: Rafael Wysocki <rafael.j.wysocki@intel.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20131212141654.616820819@infradead.org Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
		
							parent
							
								
									40e2d7f9b5
								
							
						
					
					
						commit
						1682425539
					
				
					 7 changed files with 43 additions and 78 deletions
				
			
		| 
						 | 
					@ -1,6 +1,8 @@
 | 
				
			||||||
#ifndef _ASM_X86_MWAIT_H
 | 
					#ifndef _ASM_X86_MWAIT_H
 | 
				
			||||||
#define _ASM_X86_MWAIT_H
 | 
					#define _ASM_X86_MWAIT_H
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <linux/sched.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define MWAIT_SUBSTATE_MASK		0xf
 | 
					#define MWAIT_SUBSTATE_MASK		0xf
 | 
				
			||||||
#define MWAIT_CSTATE_MASK		0xf
 | 
					#define MWAIT_CSTATE_MASK		0xf
 | 
				
			||||||
#define MWAIT_SUBSTATE_SIZE		4
 | 
					#define MWAIT_SUBSTATE_SIZE		4
 | 
				
			||||||
| 
						 | 
					@ -13,4 +15,42 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define MWAIT_ECX_INTERRUPT_BREAK	0x1
 | 
					#define MWAIT_ECX_INTERRUPT_BREAK	0x1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void __monitor(const void *eax, unsigned long ecx,
 | 
				
			||||||
 | 
								     unsigned long edx)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						/* "monitor %eax, %ecx, %edx;" */
 | 
				
			||||||
 | 
						asm volatile(".byte 0x0f, 0x01, 0xc8;"
 | 
				
			||||||
 | 
							     :: "a" (eax), "c" (ecx), "d"(edx));
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void __mwait(unsigned long eax, unsigned long ecx)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						/* "mwait %eax, %ecx;" */
 | 
				
			||||||
 | 
						asm volatile(".byte 0x0f, 0x01, 0xc9;"
 | 
				
			||||||
 | 
							     :: "a" (eax), "c" (ecx));
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
 | 
				
			||||||
 | 
					 * which can obviate IPI to trigger checking of need_resched.
 | 
				
			||||||
 | 
					 * We execute MONITOR against need_resched and enter optimized wait state
 | 
				
			||||||
 | 
					 * through MWAIT. Whenever someone changes need_resched, we would be woken
 | 
				
			||||||
 | 
					 * up from MWAIT (without an IPI).
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * New with Core Duo processors, MWAIT can take some hints based on CPU
 | 
				
			||||||
 | 
					 * capability.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						if (!current_set_polling_and_test()) {
 | 
				
			||||||
 | 
							if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
 | 
				
			||||||
 | 
								clflush((void *)¤t_thread_info()->flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							__monitor((void *)¤t_thread_info()->flags, 0, 0);
 | 
				
			||||||
 | 
							if (!need_resched())
 | 
				
			||||||
 | 
								__mwait(eax, ecx);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						__current_clr_polling();
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* _ASM_X86_MWAIT_H */
 | 
					#endif /* _ASM_X86_MWAIT_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -700,29 +700,6 @@ static inline void sync_core(void)
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void __monitor(const void *eax, unsigned long ecx,
 | 
					 | 
				
			||||||
			     unsigned long edx)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	/* "monitor %eax, %ecx, %edx;" */
 | 
					 | 
				
			||||||
	asm volatile(".byte 0x0f, 0x01, 0xc8;"
 | 
					 | 
				
			||||||
		     :: "a" (eax), "c" (ecx), "d"(edx));
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline void __mwait(unsigned long eax, unsigned long ecx)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	/* "mwait %eax, %ecx;" */
 | 
					 | 
				
			||||||
	asm volatile(".byte 0x0f, 0x01, 0xc9;"
 | 
					 | 
				
			||||||
		     :: "a" (eax), "c" (ecx));
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	trace_hardirqs_on();
 | 
					 | 
				
			||||||
	/* "mwait %eax, %ecx;" */
 | 
					 | 
				
			||||||
	asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
 | 
					 | 
				
			||||||
		     :: "a" (eax), "c" (ecx));
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
extern void select_idle_routine(const struct cpuinfo_x86 *c);
 | 
					extern void select_idle_routine(const struct cpuinfo_x86 *c);
 | 
				
			||||||
extern void init_amd_e400_c1e_mask(void);
 | 
					extern void init_amd_e400_c1e_mask(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -150,29 +150,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
 | 
					EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
 | 
					 | 
				
			||||||
 * which can obviate IPI to trigger checking of need_resched.
 | 
					 | 
				
			||||||
 * We execute MONITOR against need_resched and enter optimized wait state
 | 
					 | 
				
			||||||
 * through MWAIT. Whenever someone changes need_resched, we would be woken
 | 
					 | 
				
			||||||
 * up from MWAIT (without an IPI).
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * New with Core Duo processors, MWAIT can take some hints based on CPU
 | 
					 | 
				
			||||||
 * capability.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (!need_resched()) {
 | 
					 | 
				
			||||||
		if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
 | 
					 | 
				
			||||||
			clflush((void *)¤t_thread_info()->flags);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		__monitor((void *)¤t_thread_info()->flags, 0, 0);
 | 
					 | 
				
			||||||
		smp_mb();
 | 
					 | 
				
			||||||
		if (!need_resched())
 | 
					 | 
				
			||||||
			__mwait(ax, cx);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
 | 
					void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned int cpu = smp_processor_id();
 | 
						unsigned int cpu = smp_processor_id();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -193,10 +193,7 @@ static int power_saving_thread(void *data)
 | 
				
			||||||
					CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
 | 
										CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
 | 
				
			||||||
			stop_critical_timings();
 | 
								stop_critical_timings();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			__monitor((void *)¤t_thread_info()->flags, 0, 0);
 | 
								mwait_idle_with_hints(power_saving_mwait_eax, 1);
 | 
				
			||||||
			smp_mb();
 | 
					 | 
				
			||||||
			if (!need_resched())
 | 
					 | 
				
			||||||
				__mwait(power_saving_mwait_eax, 1);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
			start_critical_timings();
 | 
								start_critical_timings();
 | 
				
			||||||
			if (lapic_marked_unstable)
 | 
								if (lapic_marked_unstable)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -727,11 +727,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
 | 
				
			||||||
	if (unlikely(!pr))
 | 
						if (unlikely(!pr))
 | 
				
			||||||
		return -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (cx->entry_method == ACPI_CSTATE_FFH) {
 | 
					 | 
				
			||||||
		if (current_set_polling_and_test())
 | 
					 | 
				
			||||||
			return -EINVAL;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	lapic_timer_state_broadcast(pr, cx, 1);
 | 
						lapic_timer_state_broadcast(pr, cx, 1);
 | 
				
			||||||
	acpi_idle_do_entry(cx);
 | 
						acpi_idle_do_entry(cx);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -785,11 +780,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
 | 
				
			||||||
	if (unlikely(!pr))
 | 
						if (unlikely(!pr))
 | 
				
			||||||
		return -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (cx->entry_method == ACPI_CSTATE_FFH) {
 | 
					 | 
				
			||||||
		if (current_set_polling_and_test())
 | 
					 | 
				
			||||||
			return -EINVAL;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Must be done before busmaster disable as we might need to
 | 
						 * Must be done before busmaster disable as we might need to
 | 
				
			||||||
	 * access HPET !
 | 
						 * access HPET !
 | 
				
			||||||
| 
						 | 
					@ -841,11 +831,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (cx->entry_method == ACPI_CSTATE_FFH) {
 | 
					 | 
				
			||||||
		if (current_set_polling_and_test())
 | 
					 | 
				
			||||||
			return -EINVAL;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	acpi_unlazy_tlb(smp_processor_id());
 | 
						acpi_unlazy_tlb(smp_processor_id());
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Tell the scheduler that we are going deep-idle: */
 | 
						/* Tell the scheduler that we are going deep-idle: */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -375,16 +375,7 @@ static int intel_idle(struct cpuidle_device *dev,
 | 
				
			||||||
	if (!(lapic_timer_reliable_states & (1 << (cstate))))
 | 
						if (!(lapic_timer_reliable_states & (1 << (cstate))))
 | 
				
			||||||
		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
 | 
							clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!current_set_polling_and_test()) {
 | 
						mwait_idle_with_hints(eax, ecx);
 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
 | 
					 | 
				
			||||||
			clflush((void *)¤t_thread_info()->flags);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		__monitor((void *)¤t_thread_info()->flags, 0, 0);
 | 
					 | 
				
			||||||
		smp_mb();
 | 
					 | 
				
			||||||
		if (!need_resched())
 | 
					 | 
				
			||||||
			__mwait(eax, ecx);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!(lapic_timer_reliable_states & (1 << (cstate))))
 | 
						if (!(lapic_timer_reliable_states & (1 << (cstate))))
 | 
				
			||||||
		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
 | 
							clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -438,9 +438,7 @@ static int clamp_thread(void *arg)
 | 
				
			||||||
			 */
 | 
								 */
 | 
				
			||||||
			local_touch_nmi();
 | 
								local_touch_nmi();
 | 
				
			||||||
			stop_critical_timings();
 | 
								stop_critical_timings();
 | 
				
			||||||
			__monitor((void *)¤t_thread_info()->flags, 0, 0);
 | 
								mwait_idle_with_hints(eax, ecx);
 | 
				
			||||||
			cpu_relax(); /* allow HT sibling to run */
 | 
					 | 
				
			||||||
			__mwait(eax, ecx);
 | 
					 | 
				
			||||||
			start_critical_timings();
 | 
								start_critical_timings();
 | 
				
			||||||
			atomic_inc(&idle_wakeup_counter);
 | 
								atomic_inc(&idle_wakeup_counter);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue