diff options
Diffstat (limited to 'kernel/trace/trace_mmiotrace.c')
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index 2472f6f76b50..a5e8f4878bfa 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -31,7 +31,7 @@ static void mmio_reset_data(struct trace_array *tr) | |||
31 | overrun_detected = false; | 31 | overrun_detected = false; |
32 | prev_overruns = 0; | 32 | prev_overruns = 0; |
33 | 33 | ||
34 | tracing_reset_online_cpus(tr); | 34 | tracing_reset_online_cpus(&tr->trace_buffer); |
35 | } | 35 | } |
36 | 36 | ||
37 | static int mmio_trace_init(struct trace_array *tr) | 37 | static int mmio_trace_init(struct trace_array *tr) |
@@ -128,7 +128,7 @@ static void mmio_close(struct trace_iterator *iter) | |||
128 | static unsigned long count_overruns(struct trace_iterator *iter) | 128 | static unsigned long count_overruns(struct trace_iterator *iter) |
129 | { | 129 | { |
130 | unsigned long cnt = atomic_xchg(&dropped_count, 0); | 130 | unsigned long cnt = atomic_xchg(&dropped_count, 0); |
131 | unsigned long over = ring_buffer_overruns(iter->tr->buffer); | 131 | unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer); |
132 | 132 | ||
133 | if (over > prev_overruns) | 133 | if (over > prev_overruns) |
134 | cnt += over - prev_overruns; | 134 | cnt += over - prev_overruns; |
@@ -309,7 +309,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
309 | struct mmiotrace_rw *rw) | 309 | struct mmiotrace_rw *rw) |
310 | { | 310 | { |
311 | struct ftrace_event_call *call = &event_mmiotrace_rw; | 311 | struct ftrace_event_call *call = &event_mmiotrace_rw; |
312 | struct ring_buffer *buffer = tr->buffer; | 312 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
313 | struct ring_buffer_event *event; | 313 | struct ring_buffer_event *event; |
314 | struct trace_mmiotrace_rw *entry; | 314 | struct trace_mmiotrace_rw *entry; |
315 | int pc = preempt_count(); | 315 | int pc = preempt_count(); |
@@ -330,7 +330,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
330 | void mmio_trace_rw(struct mmiotrace_rw *rw) | 330 | void mmio_trace_rw(struct mmiotrace_rw *rw) |
331 | { | 331 | { |
332 | struct trace_array *tr = mmio_trace_array; | 332 | struct trace_array *tr = mmio_trace_array; |
333 | struct trace_array_cpu *data = per_cpu_ptr(tr->data, smp_processor_id()); | 333 | struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id()); |
334 | __trace_mmiotrace_rw(tr, data, rw); | 334 | __trace_mmiotrace_rw(tr, data, rw); |
335 | } | 335 | } |
336 | 336 | ||
@@ -339,7 +339,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
339 | struct mmiotrace_map *map) | 339 | struct mmiotrace_map *map) |
340 | { | 340 | { |
341 | struct ftrace_event_call *call = &event_mmiotrace_map; | 341 | struct ftrace_event_call *call = &event_mmiotrace_map; |
342 | struct ring_buffer *buffer = tr->buffer; | 342 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
343 | struct ring_buffer_event *event; | 343 | struct ring_buffer_event *event; |
344 | struct trace_mmiotrace_map *entry; | 344 | struct trace_mmiotrace_map *entry; |
345 | int pc = preempt_count(); | 345 | int pc = preempt_count(); |
@@ -363,7 +363,7 @@ void mmio_trace_mapping(struct mmiotrace_map *map) | |||
363 | struct trace_array_cpu *data; | 363 | struct trace_array_cpu *data; |
364 | 364 | ||
365 | preempt_disable(); | 365 | preempt_disable(); |
366 | data = per_cpu_ptr(tr->data, smp_processor_id()); | 366 | data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id()); |
367 | __trace_mmiotrace_map(tr, data, map); | 367 | __trace_mmiotrace_map(tr, data, map); |
368 | preempt_enable(); | 368 | preempt_enable(); |
369 | } | 369 | } |