forked from mirrors/linux
		
	[S390] mutex: Introduce arch_mutex_cpu_relax()
The spinning mutex implementation uses cpu_relax() in busy loops as a compiler barrier. Depending on the architecture, cpu_relax() may do more than needed in this specific mutex spin loops. On System z we also give up the time slice of the virtual cpu in cpu_relax(), which prevents effective spinning on the mutex. This patch replaces cpu_relax() in the spinning mutex code with arch_mutex_cpu_relax(), which can be defined by each architecture that selects HAVE_ARCH_MUTEX_CPU_RELAX. The default is still cpu_relax(), so this patch should not affect other architectures than System z for now. Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1290437256.7455.4.camel@thinkpad> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
		
							parent
							
								
									c03017544e
								
							
						
					
					
						commit
						34b133f8e9
					
				
					 6 changed files with 13 additions and 2 deletions
				
			
		|  | @ -175,4 +175,7 @@ config HAVE_PERF_EVENTS_NMI | |||
| config HAVE_ARCH_JUMP_LABEL | ||||
| 	bool | ||||
| 
 | ||||
| config HAVE_ARCH_MUTEX_CPU_RELAX | ||||
| 	bool | ||||
| 
 | ||||
| source "kernel/gcov/Kconfig" | ||||
|  |  | |||
|  | @ -87,6 +87,7 @@ config S390 | |||
| 	select HAVE_KERNEL_LZMA | ||||
| 	select HAVE_KERNEL_LZO | ||||
| 	select HAVE_GET_USER_PAGES_FAST | ||||
| 	select HAVE_ARCH_MUTEX_CPU_RELAX | ||||
| 	select ARCH_INLINE_SPIN_TRYLOCK | ||||
| 	select ARCH_INLINE_SPIN_TRYLOCK_BH | ||||
| 	select ARCH_INLINE_SPIN_LOCK | ||||
|  |  | |||
|  | @ -7,3 +7,5 @@ | |||
|  */ | ||||
| 
 | ||||
| #include <asm-generic/mutex-dec.h> | ||||
| 
 | ||||
| #define arch_mutex_cpu_relax()	barrier() | ||||
|  |  | |||
|  | @ -160,4 +160,8 @@ extern int mutex_trylock(struct mutex *lock); | |||
| extern void mutex_unlock(struct mutex *lock); | ||||
| extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); | ||||
| 
 | ||||
| #ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX | ||||
| #define arch_mutex_cpu_relax()	cpu_relax() | ||||
| #endif | ||||
| 
 | ||||
| #endif | ||||
|  |  | |||
|  | @ -199,7 +199,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
| 		 * memory barriers as we'll eventually observe the right | ||||
| 		 * values at the cost of a few extra spins. | ||||
| 		 */ | ||||
| 		cpu_relax(); | ||||
| 		arch_mutex_cpu_relax(); | ||||
| 	} | ||||
| #endif | ||||
| 	spin_lock_mutex(&lock->wait_lock, flags); | ||||
|  |  | |||
|  | @ -75,6 +75,7 @@ | |||
| 
 | ||||
| #include <asm/tlb.h> | ||||
| #include <asm/irq_regs.h> | ||||
| #include <asm/mutex.h> | ||||
| 
 | ||||
| #include "sched_cpupri.h" | ||||
| #include "workqueue_sched.h" | ||||
|  | @ -4214,7 +4215,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | |||
| 		if (task_thread_info(rq->curr) != owner || need_resched()) | ||||
| 			return 0; | ||||
| 
 | ||||
| 		cpu_relax(); | ||||
| 		arch_mutex_cpu_relax(); | ||||
| 	} | ||||
| 
 | ||||
| 	return 1; | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Gerald Schaefer
						Gerald Schaefer