diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-09-02 14:17:06 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2009-09-04 18:59:39 -0400 |
commit | e77405ad80f53966524b5c31244e13fbbbecbd84 (patch) | |
tree | 65c05f9e1573e9958e52bb72655e00c8592aacd2 /kernel/trace/trace_mmiotrace.c | |
parent | f633903af2ceb0cec07d45e499a072b6593d0ed1 (diff) |
tracing: pass around ring buffer instead of tracer
The latency tracers (irqsoff and wakeup) can swap trace buffers
on the fly. If an event is happening and has reserved data on one of
the buffers, and the latency tracer swaps the global buffer with the
max buffer, the result is that the event may commit the data to the
wrong buffer.
This patch changes the API to the trace recording to be recieve the
buffer that was used to reserve a commit. Then this buffer can be passed
in to the commit.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_mmiotrace.c')
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index d53b45ed0806..c4c9bbda53d3 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -307,11 +307,12 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
307 | struct trace_array_cpu *data, | 307 | struct trace_array_cpu *data, |
308 | struct mmiotrace_rw *rw) | 308 | struct mmiotrace_rw *rw) |
309 | { | 309 | { |
310 | struct ring_buffer *buffer = tr->buffer; | ||
310 | struct ring_buffer_event *event; | 311 | struct ring_buffer_event *event; |
311 | struct trace_mmiotrace_rw *entry; | 312 | struct trace_mmiotrace_rw *entry; |
312 | int pc = preempt_count(); | 313 | int pc = preempt_count(); |
313 | 314 | ||
314 | event = trace_buffer_lock_reserve(tr, TRACE_MMIO_RW, | 315 | event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW, |
315 | sizeof(*entry), 0, pc); | 316 | sizeof(*entry), 0, pc); |
316 | if (!event) { | 317 | if (!event) { |
317 | atomic_inc(&dropped_count); | 318 | atomic_inc(&dropped_count); |
@@ -319,7 +320,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
319 | } | 320 | } |
320 | entry = ring_buffer_event_data(event); | 321 | entry = ring_buffer_event_data(event); |
321 | entry->rw = *rw; | 322 | entry->rw = *rw; |
322 | trace_buffer_unlock_commit(tr, event, 0, pc); | 323 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
323 | } | 324 | } |
324 | 325 | ||
325 | void mmio_trace_rw(struct mmiotrace_rw *rw) | 326 | void mmio_trace_rw(struct mmiotrace_rw *rw) |
@@ -333,11 +334,12 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
333 | struct trace_array_cpu *data, | 334 | struct trace_array_cpu *data, |
334 | struct mmiotrace_map *map) | 335 | struct mmiotrace_map *map) |
335 | { | 336 | { |
337 | struct ring_buffer *buffer = tr->buffer; | ||
336 | struct ring_buffer_event *event; | 338 | struct ring_buffer_event *event; |
337 | struct trace_mmiotrace_map *entry; | 339 | struct trace_mmiotrace_map *entry; |
338 | int pc = preempt_count(); | 340 | int pc = preempt_count(); |
339 | 341 | ||
340 | event = trace_buffer_lock_reserve(tr, TRACE_MMIO_MAP, | 342 | event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP, |
341 | sizeof(*entry), 0, pc); | 343 | sizeof(*entry), 0, pc); |
342 | if (!event) { | 344 | if (!event) { |
343 | atomic_inc(&dropped_count); | 345 | atomic_inc(&dropped_count); |
@@ -345,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
345 | } | 347 | } |
346 | entry = ring_buffer_event_data(event); | 348 | entry = ring_buffer_event_data(event); |
347 | entry->map = *map; | 349 | entry->map = *map; |
348 | trace_buffer_unlock_commit(tr, event, 0, pc); | 350 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
349 | } | 351 | } |
350 | 352 | ||
351 | void mmio_trace_mapping(struct mmiotrace_map *map) | 353 | void mmio_trace_mapping(struct mmiotrace_map *map) |