diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2009-10-29 09:34:15 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-10-29 09:34:15 -0400 |
commit | dd17c8f72993f9461e9c19250e3f155d6d99df22 (patch) | |
tree | c33eedf0cf2862e9feeb796e94d49a2ccdce0149 /kernel/trace | |
parent | 390dfd95c5df1ab3921dd388d11b2aee332c3f2c (diff) |
percpu: remove per_cpu__ prefix.
Now that the return from alloc_percpu is compatible with the address
of per-cpu vars, it makes sense to hand around the address of per-cpu
variables. To make this sane, we remove the per_cpu__ prefix we used
created to stop people accidentally using these vars directly.
Now we have sparse, we can use that (next patch).
tj: * Updated to convert stuff which were missed by or added after the
original patch.
* Kill per_cpu_var() macro.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 4 |
2 files changed, 5 insertions, 5 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 85a5ed70b5b2..b808177af816 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -91,12 +91,12 @@ DEFINE_PER_CPU(int, ftrace_cpu_disabled); | |||
91 | static inline void ftrace_disable_cpu(void) | 91 | static inline void ftrace_disable_cpu(void) |
92 | { | 92 | { |
93 | preempt_disable(); | 93 | preempt_disable(); |
94 | __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); | 94 | __this_cpu_inc(ftrace_cpu_disabled); |
95 | } | 95 | } |
96 | 96 | ||
97 | static inline void ftrace_enable_cpu(void) | 97 | static inline void ftrace_enable_cpu(void) |
98 | { | 98 | { |
99 | __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); | 99 | __this_cpu_dec(ftrace_cpu_disabled); |
100 | preempt_enable(); | 100 | preempt_enable(); |
101 | } | 101 | } |
102 | 102 | ||
@@ -1085,7 +1085,7 @@ trace_function(struct trace_array *tr, | |||
1085 | struct ftrace_entry *entry; | 1085 | struct ftrace_entry *entry; |
1086 | 1086 | ||
1087 | /* If we are reading the ring buffer, don't trace */ | 1087 | /* If we are reading the ring buffer, don't trace */ |
1088 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) | 1088 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) |
1089 | return; | 1089 | return; |
1090 | 1090 | ||
1091 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), | 1091 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 90a6daa10962..8614e3241ff8 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -176,7 +176,7 @@ static int __trace_graph_entry(struct trace_array *tr, | |||
176 | struct ring_buffer *buffer = tr->buffer; | 176 | struct ring_buffer *buffer = tr->buffer; |
177 | struct ftrace_graph_ent_entry *entry; | 177 | struct ftrace_graph_ent_entry *entry; |
178 | 178 | ||
179 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) | 179 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) |
180 | return 0; | 180 | return 0; |
181 | 181 | ||
182 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, | 182 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, |
@@ -240,7 +240,7 @@ static void __trace_graph_return(struct trace_array *tr, | |||
240 | struct ring_buffer *buffer = tr->buffer; | 240 | struct ring_buffer *buffer = tr->buffer; |
241 | struct ftrace_graph_ret_entry *entry; | 241 | struct ftrace_graph_ret_entry *entry; |
242 | 242 | ||
243 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) | 243 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) |
244 | return; | 244 | return; |
245 | 245 | ||
246 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, | 246 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, |