diff options
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 39ada66389cc..8388bc99f2ee 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -218,7 +218,7 @@ int __trace_graph_entry(struct trace_array *tr, | |||
218 | { | 218 | { |
219 | struct ftrace_event_call *call = &event_funcgraph_entry; | 219 | struct ftrace_event_call *call = &event_funcgraph_entry; |
220 | struct ring_buffer_event *event; | 220 | struct ring_buffer_event *event; |
221 | struct ring_buffer *buffer = tr->buffer; | 221 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
222 | struct ftrace_graph_ent_entry *entry; | 222 | struct ftrace_graph_ent_entry *entry; |
223 | 223 | ||
224 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) | 224 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) |
@@ -265,7 +265,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |||
265 | 265 | ||
266 | local_irq_save(flags); | 266 | local_irq_save(flags); |
267 | cpu = raw_smp_processor_id(); | 267 | cpu = raw_smp_processor_id(); |
268 | data = tr->data[cpu]; | 268 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
269 | disabled = atomic_inc_return(&data->disabled); | 269 | disabled = atomic_inc_return(&data->disabled); |
270 | if (likely(disabled == 1)) { | 270 | if (likely(disabled == 1)) { |
271 | pc = preempt_count(); | 271 | pc = preempt_count(); |
@@ -323,7 +323,7 @@ void __trace_graph_return(struct trace_array *tr, | |||
323 | { | 323 | { |
324 | struct ftrace_event_call *call = &event_funcgraph_exit; | 324 | struct ftrace_event_call *call = &event_funcgraph_exit; |
325 | struct ring_buffer_event *event; | 325 | struct ring_buffer_event *event; |
326 | struct ring_buffer *buffer = tr->buffer; | 326 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
327 | struct ftrace_graph_ret_entry *entry; | 327 | struct ftrace_graph_ret_entry *entry; |
328 | 328 | ||
329 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) | 329 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) |
@@ -350,7 +350,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace) | |||
350 | 350 | ||
351 | local_irq_save(flags); | 351 | local_irq_save(flags); |
352 | cpu = raw_smp_processor_id(); | 352 | cpu = raw_smp_processor_id(); |
353 | data = tr->data[cpu]; | 353 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
354 | disabled = atomic_inc_return(&data->disabled); | 354 | disabled = atomic_inc_return(&data->disabled); |
355 | if (likely(disabled == 1)) { | 355 | if (likely(disabled == 1)) { |
356 | pc = preempt_count(); | 356 | pc = preempt_count(); |
@@ -560,9 +560,9 @@ get_return_for_leaf(struct trace_iterator *iter, | |||
560 | * We need to consume the current entry to see | 560 | * We need to consume the current entry to see |
561 | * the next one. | 561 | * the next one. |
562 | */ | 562 | */ |
563 | ring_buffer_consume(iter->tr->buffer, iter->cpu, | 563 | ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, |
564 | NULL, NULL); | 564 | NULL, NULL); |
565 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, | 565 | event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu, |
566 | NULL, NULL); | 566 | NULL, NULL); |
567 | } | 567 | } |
568 | 568 | ||