aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_branch.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_branch.c')
-rw-r--r--kernel/trace/trace_branch.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 95e96842ed29..d594da0dc03c 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -32,6 +32,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
32{ 32{
33 struct ftrace_event_call *call = &event_branch; 33 struct ftrace_event_call *call = &event_branch;
34 struct trace_array *tr = branch_tracer; 34 struct trace_array *tr = branch_tracer;
35 struct trace_array_cpu *data;
35 struct ring_buffer_event *event; 36 struct ring_buffer_event *event;
36 struct trace_branch *entry; 37 struct trace_branch *entry;
37 struct ring_buffer *buffer; 38 struct ring_buffer *buffer;
@@ -51,11 +52,12 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
51 52
52 local_irq_save(flags); 53 local_irq_save(flags);
53 cpu = raw_smp_processor_id(); 54 cpu = raw_smp_processor_id();
54 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) 55 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
56 if (atomic_inc_return(&data->disabled) != 1)
55 goto out; 57 goto out;
56 58
57 pc = preempt_count(); 59 pc = preempt_count();
58 buffer = tr->buffer; 60 buffer = tr->trace_buffer.buffer;
59 event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH, 61 event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
60 sizeof(*entry), flags, pc); 62 sizeof(*entry), flags, pc);
61 if (!event) 63 if (!event)
@@ -80,7 +82,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
80 __buffer_unlock_commit(buffer, event); 82 __buffer_unlock_commit(buffer, event);
81 83
82 out: 84 out:
83 atomic_dec(&tr->data[cpu]->disabled); 85 atomic_dec(&data->disabled);
84 local_irq_restore(flags); 86 local_irq_restore(flags);
85} 87}
86 88