forked from mirrors/linux
		
	tools/perf: Add required memory barriers
To match patch bf378d341e ("perf: Fix perf ring buffer memory
ordering") change userspace to also adhere to the ordering outlined.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Michael Neuling <mikey@neuling.org>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: james.hogan@imgtec.com
Cc: Vince Weaver <vince@deater.net>
Cc: Victor Kaplansky <VICTORK@il.ibm.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Cc: Michael Ellerman <michael@ellerman.id.au>
Link: http://lkml.kernel.org/r/20131030104246.GH16117@laptop.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
			
			
This commit is contained in:
		
							parent
							
								
									0a196848ca
								
							
						
					
					
						commit
						a94d342b9c
					
				
					 3 changed files with 49 additions and 16 deletions
				
			
		|  | @ -4,6 +4,8 @@ | ||||||
| #include <asm/unistd.h> | #include <asm/unistd.h> | ||||||
| 
 | 
 | ||||||
| #if defined(__i386__) | #if defined(__i386__) | ||||||
|  | #define mb()		asm volatile("lock; addl $0,0(%%esp)" ::: "memory") | ||||||
|  | #define wmb()		asm volatile("lock; addl $0,0(%%esp)" ::: "memory") | ||||||
| #define rmb()		asm volatile("lock; addl $0,0(%%esp)" ::: "memory") | #define rmb()		asm volatile("lock; addl $0,0(%%esp)" ::: "memory") | ||||||
| #define cpu_relax()	asm volatile("rep; nop" ::: "memory"); | #define cpu_relax()	asm volatile("rep; nop" ::: "memory"); | ||||||
| #define CPUINFO_PROC	"model name" | #define CPUINFO_PROC	"model name" | ||||||
|  | @ -13,6 +15,8 @@ | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #if defined(__x86_64__) | #if defined(__x86_64__) | ||||||
|  | #define mb()		asm volatile("mfence" ::: "memory") | ||||||
|  | #define wmb()		asm volatile("sfence" ::: "memory") | ||||||
| #define rmb()		asm volatile("lfence" ::: "memory") | #define rmb()		asm volatile("lfence" ::: "memory") | ||||||
| #define cpu_relax()	asm volatile("rep; nop" ::: "memory"); | #define cpu_relax()	asm volatile("rep; nop" ::: "memory"); | ||||||
| #define CPUINFO_PROC	"model name" | #define CPUINFO_PROC	"model name" | ||||||
|  | @ -23,45 +27,61 @@ | ||||||
| 
 | 
 | ||||||
| #ifdef __powerpc__ | #ifdef __powerpc__ | ||||||
| #include "../../arch/powerpc/include/uapi/asm/unistd.h" | #include "../../arch/powerpc/include/uapi/asm/unistd.h" | ||||||
|  | #define mb()		asm volatile ("sync" ::: "memory") | ||||||
|  | #define wmb()		asm volatile ("sync" ::: "memory") | ||||||
| #define rmb()		asm volatile ("sync" ::: "memory") | #define rmb()		asm volatile ("sync" ::: "memory") | ||||||
| #define cpu_relax()	asm volatile ("" ::: "memory"); |  | ||||||
| #define CPUINFO_PROC	"cpu" | #define CPUINFO_PROC	"cpu" | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #ifdef __s390__ | #ifdef __s390__ | ||||||
|  | #define mb()		asm volatile("bcr 15,0" ::: "memory") | ||||||
|  | #define wmb()		asm volatile("bcr 15,0" ::: "memory") | ||||||
| #define rmb()		asm volatile("bcr 15,0" ::: "memory") | #define rmb()		asm volatile("bcr 15,0" ::: "memory") | ||||||
| #define cpu_relax()	asm volatile("" ::: "memory"); |  | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #ifdef __sh__ | #ifdef __sh__ | ||||||
| #if defined(__SH4A__) || defined(__SH5__) | #if defined(__SH4A__) || defined(__SH5__) | ||||||
|  | # define mb()		asm volatile("synco" ::: "memory") | ||||||
|  | # define wmb()		asm volatile("synco" ::: "memory") | ||||||
| # define rmb()		asm volatile("synco" ::: "memory") | # define rmb()		asm volatile("synco" ::: "memory") | ||||||
| #else | #else | ||||||
|  | # define mb()		asm volatile("" ::: "memory") | ||||||
|  | # define wmb()		asm volatile("" ::: "memory") | ||||||
| # define rmb()		asm volatile("" ::: "memory") | # define rmb()		asm volatile("" ::: "memory") | ||||||
| #endif | #endif | ||||||
| #define cpu_relax()	asm volatile("" ::: "memory") |  | ||||||
| #define CPUINFO_PROC	"cpu type" | #define CPUINFO_PROC	"cpu type" | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #ifdef __hppa__ | #ifdef __hppa__ | ||||||
|  | #define mb()		asm volatile("" ::: "memory") | ||||||
|  | #define wmb()		asm volatile("" ::: "memory") | ||||||
| #define rmb()		asm volatile("" ::: "memory") | #define rmb()		asm volatile("" ::: "memory") | ||||||
| #define cpu_relax()	asm volatile("" ::: "memory"); |  | ||||||
| #define CPUINFO_PROC	"cpu" | #define CPUINFO_PROC	"cpu" | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #ifdef __sparc__ | #ifdef __sparc__ | ||||||
|  | #ifdef __LP64__ | ||||||
|  | #define mb()		asm volatile("ba,pt %%xcc, 1f\n"	\ | ||||||
|  | 				     "membar #StoreLoad\n"	\ | ||||||
|  | 				     "1:\n":::"memory") | ||||||
|  | #else | ||||||
|  | #define mb()		asm volatile("":::"memory") | ||||||
|  | #endif | ||||||
|  | #define wmb()		asm volatile("":::"memory") | ||||||
| #define rmb()		asm volatile("":::"memory") | #define rmb()		asm volatile("":::"memory") | ||||||
| #define cpu_relax()	asm volatile("":::"memory") |  | ||||||
| #define CPUINFO_PROC	"cpu" | #define CPUINFO_PROC	"cpu" | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #ifdef __alpha__ | #ifdef __alpha__ | ||||||
|  | #define mb()		asm volatile("mb" ::: "memory") | ||||||
|  | #define wmb()		asm volatile("wmb" ::: "memory") | ||||||
| #define rmb()		asm volatile("mb" ::: "memory") | #define rmb()		asm volatile("mb" ::: "memory") | ||||||
| #define cpu_relax()	asm volatile("" ::: "memory") |  | ||||||
| #define CPUINFO_PROC	"cpu model" | #define CPUINFO_PROC	"cpu model" | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #ifdef __ia64__ | #ifdef __ia64__ | ||||||
|  | #define mb()		asm volatile ("mf" ::: "memory") | ||||||
|  | #define wmb()		asm volatile ("mf" ::: "memory") | ||||||
| #define rmb()		asm volatile ("mf" ::: "memory") | #define rmb()		asm volatile ("mf" ::: "memory") | ||||||
| #define cpu_relax()	asm volatile ("hint @pause" ::: "memory") | #define cpu_relax()	asm volatile ("hint @pause" ::: "memory") | ||||||
| #define CPUINFO_PROC	"model name" | #define CPUINFO_PROC	"model name" | ||||||
|  | @ -72,40 +92,55 @@ | ||||||
|  * Use the __kuser_memory_barrier helper in the CPU helper page. See |  * Use the __kuser_memory_barrier helper in the CPU helper page. See | ||||||
|  * arch/arm/kernel/entry-armv.S in the kernel source for details. |  * arch/arm/kernel/entry-armv.S in the kernel source for details. | ||||||
|  */ |  */ | ||||||
|  | #define mb()		((void(*)(void))0xffff0fa0)() | ||||||
|  | #define wmb()		((void(*)(void))0xffff0fa0)() | ||||||
| #define rmb()		((void(*)(void))0xffff0fa0)() | #define rmb()		((void(*)(void))0xffff0fa0)() | ||||||
| #define cpu_relax()	asm volatile("":::"memory") |  | ||||||
| #define CPUINFO_PROC	"Processor" | #define CPUINFO_PROC	"Processor" | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #ifdef __aarch64__ | #ifdef __aarch64__ | ||||||
| #define rmb()		asm volatile("dmb ld" ::: "memory") | #define mb()		asm volatile("dmb ish" ::: "memory") | ||||||
|  | #define wmb()		asm volatile("dmb ishld" ::: "memory") | ||||||
|  | #define rmb()		asm volatile("dmb ishst" ::: "memory") | ||||||
| #define cpu_relax()	asm volatile("yield" ::: "memory") | #define cpu_relax()	asm volatile("yield" ::: "memory") | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #ifdef __mips__ | #ifdef __mips__ | ||||||
| #define rmb()		asm volatile(					\ | #define mb()		asm volatile(					\ | ||||||
| 				".set	mips2\n\t"			\ | 				".set	mips2\n\t"			\ | ||||||
| 				"sync\n\t"				\ | 				"sync\n\t"				\ | ||||||
| 				".set	mips0"				\ | 				".set	mips0"				\ | ||||||
| 				: /* no output */			\ | 				: /* no output */			\ | ||||||
| 				: /* no input */			\ | 				: /* no input */			\ | ||||||
| 				: "memory") | 				: "memory") | ||||||
| #define cpu_relax()	asm volatile("" ::: "memory") | #define wmb()	mb() | ||||||
|  | #define rmb()	mb() | ||||||
| #define CPUINFO_PROC	"cpu model" | #define CPUINFO_PROC	"cpu model" | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #ifdef __arc__ | #ifdef __arc__ | ||||||
|  | #define mb()		asm volatile("" ::: "memory") | ||||||
|  | #define wmb()		asm volatile("" ::: "memory") | ||||||
| #define rmb()		asm volatile("" ::: "memory") | #define rmb()		asm volatile("" ::: "memory") | ||||||
| #define cpu_relax()	rmb() |  | ||||||
| #define CPUINFO_PROC	"Processor" | #define CPUINFO_PROC	"Processor" | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #ifdef __metag__ | #ifdef __metag__ | ||||||
|  | #define mb()		asm volatile("" ::: "memory") | ||||||
|  | #define wmb()		asm volatile("" ::: "memory") | ||||||
| #define rmb()		asm volatile("" ::: "memory") | #define rmb()		asm volatile("" ::: "memory") | ||||||
| #define cpu_relax()	asm volatile("" ::: "memory") |  | ||||||
| #define CPUINFO_PROC	"CPU" | #define CPUINFO_PROC	"CPU" | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
|  | #define barrier() asm volatile ("" ::: "memory") | ||||||
|  | 
 | ||||||
|  | #ifndef cpu_relax | ||||||
|  | #define cpu_relax() barrier() | ||||||
|  | #endif | ||||||
|  | 
 | ||||||
|  | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
| #include <time.h> | #include <time.h> | ||||||
| #include <unistd.h> | #include <unistd.h> | ||||||
| #include <sys/types.h> | #include <sys/types.h> | ||||||
|  |  | ||||||
|  | @ -9,8 +9,6 @@ | ||||||
| 
 | 
 | ||||||
| #if defined(__x86_64__) || defined(__i386__) | #if defined(__x86_64__) || defined(__i386__) | ||||||
| 
 | 
 | ||||||
| #define barrier() asm volatile("" ::: "memory") |  | ||||||
| 
 |  | ||||||
| static u64 rdpmc(unsigned int counter) | static u64 rdpmc(unsigned int counter) | ||||||
| { | { | ||||||
| 	unsigned int low, high; | 	unsigned int low, high; | ||||||
|  |  | ||||||
|  | @ -177,7 +177,7 @@ int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, s | ||||||
| static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm) | static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm) | ||||||
| { | { | ||||||
| 	struct perf_event_mmap_page *pc = mm->base; | 	struct perf_event_mmap_page *pc = mm->base; | ||||||
| 	int head = pc->data_head; | 	int head = ACCESS_ONCE(pc->data_head); | ||||||
| 	rmb(); | 	rmb(); | ||||||
| 	return head; | 	return head; | ||||||
| } | } | ||||||
|  | @ -190,7 +190,7 @@ static inline void perf_mmap__write_tail(struct perf_mmap *md, | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * ensure all reads are done before we write the tail out. | 	 * ensure all reads are done before we write the tail out. | ||||||
| 	 */ | 	 */ | ||||||
| 	/* mb(); */ | 	mb(); | ||||||
| 	pc->data_tail = tail; | 	pc->data_tail = tail; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Peter Zijlstra
						Peter Zijlstra