aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-10-30 06:42:46 -0400
committerIngo Molnar <mingo@kernel.org>2013-11-06 06:34:26 -0500
commita94d342b9cb09edfe888ea972af0883b6a8d992b (patch)
treedd3af73c84e210548c79d32dafbe4fadb2a42eb9 /tools/perf
parent0a196848ca365ec582c6d86659be456be6d4ed96 (diff)
tools/perf: Add required memory barriers
To match patch bf378d341e48 ("perf: Fix perf ring buffer memory ordering") change userspace to also adhere to the ordering outlined. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Michael Neuling <mikey@neuling.org> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: james.hogan@imgtec.com Cc: Vince Weaver <vince@deater.net> Cc: Victor Kaplansky <VICTORK@il.ibm.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Anton Blanchard <anton@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Cc: Michael Ellerman <michael@ellerman.id.au> Link: http://lkml.kernel.org/r/20131030104246.GH16117@laptop.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'tools/perf')
-rw-r--r--tools/perf/perf.h59
-rw-r--r--tools/perf/tests/rdpmc.c2
-rw-r--r--tools/perf/util/evlist.h4
3 files changed, 49 insertions, 16 deletions
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index f61c230beec4..6a587e84fdfe 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -4,6 +4,8 @@
4#include <asm/unistd.h> 4#include <asm/unistd.h>
5 5
6#if defined(__i386__) 6#if defined(__i386__)
7#define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
8#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
7#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") 9#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
8#define cpu_relax() asm volatile("rep; nop" ::: "memory"); 10#define cpu_relax() asm volatile("rep; nop" ::: "memory");
9#define CPUINFO_PROC "model name" 11#define CPUINFO_PROC "model name"
@@ -13,6 +15,8 @@
13#endif 15#endif
14 16
15#if defined(__x86_64__) 17#if defined(__x86_64__)
18#define mb() asm volatile("mfence" ::: "memory")
19#define wmb() asm volatile("sfence" ::: "memory")
16#define rmb() asm volatile("lfence" ::: "memory") 20#define rmb() asm volatile("lfence" ::: "memory")
17#define cpu_relax() asm volatile("rep; nop" ::: "memory"); 21#define cpu_relax() asm volatile("rep; nop" ::: "memory");
18#define CPUINFO_PROC "model name" 22#define CPUINFO_PROC "model name"
@@ -23,45 +27,61 @@
23 27
24#ifdef __powerpc__ 28#ifdef __powerpc__
25#include "../../arch/powerpc/include/uapi/asm/unistd.h" 29#include "../../arch/powerpc/include/uapi/asm/unistd.h"
30#define mb() asm volatile ("sync" ::: "memory")
31#define wmb() asm volatile ("sync" ::: "memory")
26#define rmb() asm volatile ("sync" ::: "memory") 32#define rmb() asm volatile ("sync" ::: "memory")
27#define cpu_relax() asm volatile ("" ::: "memory");
28#define CPUINFO_PROC "cpu" 33#define CPUINFO_PROC "cpu"
29#endif 34#endif
30 35
31#ifdef __s390__ 36#ifdef __s390__
37#define mb() asm volatile("bcr 15,0" ::: "memory")
38#define wmb() asm volatile("bcr 15,0" ::: "memory")
32#define rmb() asm volatile("bcr 15,0" ::: "memory") 39#define rmb() asm volatile("bcr 15,0" ::: "memory")
33#define cpu_relax() asm volatile("" ::: "memory");
34#endif 40#endif
35 41
36#ifdef __sh__ 42#ifdef __sh__
37#if defined(__SH4A__) || defined(__SH5__) 43#if defined(__SH4A__) || defined(__SH5__)
44# define mb() asm volatile("synco" ::: "memory")
45# define wmb() asm volatile("synco" ::: "memory")
38# define rmb() asm volatile("synco" ::: "memory") 46# define rmb() asm volatile("synco" ::: "memory")
39#else 47#else
48# define mb() asm volatile("" ::: "memory")
49# define wmb() asm volatile("" ::: "memory")
40# define rmb() asm volatile("" ::: "memory") 50# define rmb() asm volatile("" ::: "memory")
41#endif 51#endif
42#define cpu_relax() asm volatile("" ::: "memory")
43#define CPUINFO_PROC "cpu type" 52#define CPUINFO_PROC "cpu type"
44#endif 53#endif
45 54
46#ifdef __hppa__ 55#ifdef __hppa__
56#define mb() asm volatile("" ::: "memory")
57#define wmb() asm volatile("" ::: "memory")
47#define rmb() asm volatile("" ::: "memory") 58#define rmb() asm volatile("" ::: "memory")
48#define cpu_relax() asm volatile("" ::: "memory");
49#define CPUINFO_PROC "cpu" 59#define CPUINFO_PROC "cpu"
50#endif 60#endif
51 61
52#ifdef __sparc__ 62#ifdef __sparc__
63#ifdef __LP64__
64#define mb() asm volatile("ba,pt %%xcc, 1f\n" \
65 "membar #StoreLoad\n" \
66 "1:\n":::"memory")
67#else
68#define mb() asm volatile("":::"memory")
69#endif
70#define wmb() asm volatile("":::"memory")
53#define rmb() asm volatile("":::"memory") 71#define rmb() asm volatile("":::"memory")
54#define cpu_relax() asm volatile("":::"memory")
55#define CPUINFO_PROC "cpu" 72#define CPUINFO_PROC "cpu"
56#endif 73#endif
57 74
58#ifdef __alpha__ 75#ifdef __alpha__
76#define mb() asm volatile("mb" ::: "memory")
77#define wmb() asm volatile("wmb" ::: "memory")
59#define rmb() asm volatile("mb" ::: "memory") 78#define rmb() asm volatile("mb" ::: "memory")
60#define cpu_relax() asm volatile("" ::: "memory")
61#define CPUINFO_PROC "cpu model" 79#define CPUINFO_PROC "cpu model"
62#endif 80#endif
63 81
64#ifdef __ia64__ 82#ifdef __ia64__
83#define mb() asm volatile ("mf" ::: "memory")
84#define wmb() asm volatile ("mf" ::: "memory")
65#define rmb() asm volatile ("mf" ::: "memory") 85#define rmb() asm volatile ("mf" ::: "memory")
66#define cpu_relax() asm volatile ("hint @pause" ::: "memory") 86#define cpu_relax() asm volatile ("hint @pause" ::: "memory")
67#define CPUINFO_PROC "model name" 87#define CPUINFO_PROC "model name"
@@ -72,40 +92,55 @@
72 * Use the __kuser_memory_barrier helper in the CPU helper page. See 92 * Use the __kuser_memory_barrier helper in the CPU helper page. See
73 * arch/arm/kernel/entry-armv.S in the kernel source for details. 93 * arch/arm/kernel/entry-armv.S in the kernel source for details.
74 */ 94 */
95#define mb() ((void(*)(void))0xffff0fa0)()
96#define wmb() ((void(*)(void))0xffff0fa0)()
75#define rmb() ((void(*)(void))0xffff0fa0)() 97#define rmb() ((void(*)(void))0xffff0fa0)()
76#define cpu_relax() asm volatile("":::"memory")
77#define CPUINFO_PROC "Processor" 98#define CPUINFO_PROC "Processor"
78#endif 99#endif
79 100
80#ifdef __aarch64__ 101#ifdef __aarch64__
81#define rmb() asm volatile("dmb ld" ::: "memory") 102#define mb() asm volatile("dmb ish" ::: "memory")
103#define wmb() asm volatile("dmb ishld" ::: "memory")
104#define rmb() asm volatile("dmb ishst" ::: "memory")
82#define cpu_relax() asm volatile("yield" ::: "memory") 105#define cpu_relax() asm volatile("yield" ::: "memory")
83#endif 106#endif
84 107
85#ifdef __mips__ 108#ifdef __mips__
86#define rmb() asm volatile( \ 109#define mb() asm volatile( \
87 ".set mips2\n\t" \ 110 ".set mips2\n\t" \
88 "sync\n\t" \ 111 "sync\n\t" \
89 ".set mips0" \ 112 ".set mips0" \
90 : /* no output */ \ 113 : /* no output */ \
91 : /* no input */ \ 114 : /* no input */ \
92 : "memory") 115 : "memory")
93#define cpu_relax() asm volatile("" ::: "memory") 116#define wmb() mb()
117#define rmb() mb()
94#define CPUINFO_PROC "cpu model" 118#define CPUINFO_PROC "cpu model"
95#endif 119#endif
96 120
97#ifdef __arc__ 121#ifdef __arc__
122#define mb() asm volatile("" ::: "memory")
123#define wmb() asm volatile("" ::: "memory")
98#define rmb() asm volatile("" ::: "memory") 124#define rmb() asm volatile("" ::: "memory")
99#define cpu_relax() rmb()
100#define CPUINFO_PROC "Processor" 125#define CPUINFO_PROC "Processor"
101#endif 126#endif
102 127
103#ifdef __metag__ 128#ifdef __metag__
129#define mb() asm volatile("" ::: "memory")
130#define wmb() asm volatile("" ::: "memory")
104#define rmb() asm volatile("" ::: "memory") 131#define rmb() asm volatile("" ::: "memory")
105#define cpu_relax() asm volatile("" ::: "memory")
106#define CPUINFO_PROC "CPU" 132#define CPUINFO_PROC "CPU"
107#endif 133#endif
108 134
135#define barrier() asm volatile ("" ::: "memory")
136
137#ifndef cpu_relax
138#define cpu_relax() barrier()
139#endif
140
141#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
142
143
109#include <time.h> 144#include <time.h>
110#include <unistd.h> 145#include <unistd.h>
111#include <sys/types.h> 146#include <sys/types.h>
diff --git a/tools/perf/tests/rdpmc.c b/tools/perf/tests/rdpmc.c
index ff94886aad99..46649c25fa5e 100644
--- a/tools/perf/tests/rdpmc.c
+++ b/tools/perf/tests/rdpmc.c
@@ -9,8 +9,6 @@
9 9
10#if defined(__x86_64__) || defined(__i386__) 10#if defined(__x86_64__) || defined(__i386__)
11 11
12#define barrier() asm volatile("" ::: "memory")
13
14static u64 rdpmc(unsigned int counter) 12static u64 rdpmc(unsigned int counter)
15{ 13{
16 unsigned int low, high; 14 unsigned int low, high;
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index e99eaed92682..ecaa582f40e2 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -177,7 +177,7 @@ int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, s
177static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm) 177static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
178{ 178{
179 struct perf_event_mmap_page *pc = mm->base; 179 struct perf_event_mmap_page *pc = mm->base;
180 int head = pc->data_head; 180 int head = ACCESS_ONCE(pc->data_head);
181 rmb(); 181 rmb();
182 return head; 182 return head;
183} 183}
@@ -190,7 +190,7 @@ static inline void perf_mmap__write_tail(struct perf_mmap *md,
190 /* 190 /*
191 * ensure all reads are done before we write the tail out. 191 * ensure all reads are done before we write the tail out.
192 */ 192 */
193 /* mb(); */ 193 mb();
194 pc->data_tail = tail; 194 pc->data_tail = tail;
195} 195}
196 196