diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2010-03-22 19:08:59 -0400 |
---|---|---|
committer | Frederic Weisbecker <fweisbec@gmail.com> | 2010-04-01 02:26:30 -0400 |
commit | eb1e79611cc9bfe21978230e3521e77ea2d7874a (patch) | |
tree | 0d5ebdb4a0b178fcd524d3b725da3e8a3cd6151e /kernel/trace | |
parent | b72c40949b0f04728f2993a1434598d3bad094ea (diff) |
perf: Correctly align perf event tracing buffer
The trace event buffer used by perf to record raw sample events
is typed as an array of char and may then not be aligned to 8
by alloc_percpu().
But we need it to be aligned to 8 in sparc64 because we cast
this buffer into a random structure type built by the TRACE_EVENT()
macro to store the traces. So if a random 64 bits field is accessed
inside, it may be not under an expected good alignment.
Use an array of long instead to force the appropriate alignment, and
perform a compile time check to ensure the size in byte of the buffer
is a multiple of sizeof(long) so that its actual size doesn't get
shrinked under us.
This fixes unaligned accesses reported while using perf lock
in sparc 64.
Suggested-by: David Miller <davem@davemloft.net>
Suggested-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: David Miller <davem@davemloft.net>
Cc: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace_event_perf.c | 11 |
1 files changed, 9 insertions, 2 deletions
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 81f691eb3a30..0565bb42566f 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -17,7 +17,12 @@ EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); | |||
17 | static char *perf_trace_buf; | 17 | static char *perf_trace_buf; |
18 | static char *perf_trace_buf_nmi; | 18 | static char *perf_trace_buf_nmi; |
19 | 19 | ||
20 | typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ; | 20 | /* |
21 | * Force it to be aligned to unsigned long to avoid misaligned accesses | ||
22 | * suprises | ||
23 | */ | ||
24 | typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)]) | ||
25 | perf_trace_t; | ||
21 | 26 | ||
22 | /* Count the events in use (per event id, not per instance) */ | 27 | /* Count the events in use (per event id, not per instance) */ |
23 | static int total_ref_count; | 28 | static int total_ref_count; |
@@ -130,6 +135,8 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, | |||
130 | char *trace_buf, *raw_data; | 135 | char *trace_buf, *raw_data; |
131 | int pc, cpu; | 136 | int pc, cpu; |
132 | 137 | ||
138 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); | ||
139 | |||
133 | pc = preempt_count(); | 140 | pc = preempt_count(); |
134 | 141 | ||
135 | /* Protect the per cpu buffer, begin the rcu read side */ | 142 | /* Protect the per cpu buffer, begin the rcu read side */ |
@@ -152,7 +159,7 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, | |||
152 | raw_data = per_cpu_ptr(trace_buf, cpu); | 159 | raw_data = per_cpu_ptr(trace_buf, cpu); |
153 | 160 | ||
154 | /* zero the dead bytes from align to not leak stack to user */ | 161 | /* zero the dead bytes from align to not leak stack to user */ |
155 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | 162 | memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64)); |
156 | 163 | ||
157 | entry = (struct trace_entry *)raw_data; | 164 | entry = (struct trace_entry *)raw_data; |
158 | tracing_generic_entry_update(entry, *irq_flags, pc); | 165 | tracing_generic_entry_update(entry, *irq_flags, pc); |