diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 88bd9ae2a9ed..c82dfd92fdfd 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -86,17 +86,17 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set) | |||
86 | */ | 86 | */ |
87 | static int tracing_disabled = 1; | 87 | static int tracing_disabled = 1; |
88 | 88 | ||
89 | DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); | 89 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); |
90 | 90 | ||
91 | static inline void ftrace_disable_cpu(void) | 91 | static inline void ftrace_disable_cpu(void) |
92 | { | 92 | { |
93 | preempt_disable(); | 93 | preempt_disable(); |
94 | local_inc(&__get_cpu_var(ftrace_cpu_disabled)); | 94 | __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); |
95 | } | 95 | } |
96 | 96 | ||
97 | static inline void ftrace_enable_cpu(void) | 97 | static inline void ftrace_enable_cpu(void) |
98 | { | 98 | { |
99 | local_dec(&__get_cpu_var(ftrace_cpu_disabled)); | 99 | __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); |
100 | preempt_enable(); | 100 | preempt_enable(); |
101 | } | 101 | } |
102 | 102 | ||
@@ -203,7 +203,7 @@ cycle_t ftrace_now(int cpu) | |||
203 | */ | 203 | */ |
204 | static struct trace_array max_tr; | 204 | static struct trace_array max_tr; |
205 | 205 | ||
206 | static DEFINE_PER_CPU(struct trace_array_cpu, max_data); | 206 | static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); |
207 | 207 | ||
208 | /* tracer_enabled is used to toggle activation of a tracer */ | 208 | /* tracer_enabled is used to toggle activation of a tracer */ |
209 | static int tracer_enabled = 1; | 209 | static int tracer_enabled = 1; |
@@ -1085,7 +1085,7 @@ trace_function(struct trace_array *tr, | |||
1085 | struct ftrace_entry *entry; | 1085 | struct ftrace_entry *entry; |
1086 | 1086 | ||
1087 | /* If we are reading the ring buffer, don't trace */ | 1087 | /* If we are reading the ring buffer, don't trace */ |
1088 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 1088 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) |
1089 | return; | 1089 | return; |
1090 | 1090 | ||
1091 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), | 1091 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
@@ -4454,7 +4454,7 @@ __init static int tracer_alloc_buffers(void) | |||
4454 | /* Allocate the first page for all buffers */ | 4454 | /* Allocate the first page for all buffers */ |
4455 | for_each_tracing_cpu(i) { | 4455 | for_each_tracing_cpu(i) { |
4456 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); | 4456 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); |
4457 | max_tr.data[i] = &per_cpu(max_data, i); | 4457 | max_tr.data[i] = &per_cpu(max_tr_data, i); |
4458 | } | 4458 | } |
4459 | 4459 | ||
4460 | trace_init_cmdlines(); | 4460 | trace_init_cmdlines(); |