diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2009-10-29 09:34:15 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-10-29 09:34:15 -0400 |
commit | dd17c8f72993f9461e9c19250e3f155d6d99df22 (patch) | |
tree | c33eedf0cf2862e9feeb796e94d49a2ccdce0149 /kernel | |
parent | 390dfd95c5df1ab3921dd388d11b2aee332c3f2c (diff) |
percpu: remove per_cpu__ prefix.
Now that the return from alloc_percpu is compatible with the address
of per-cpu vars, it makes sense to hand around the address of per-cpu
variables. To make this sane, we remove the per_cpu__ prefix we used
created to stop people accidentally using these vars directly.
Now we have sparse, we can use that (next patch).
tj: * Updated to convert stuff which were missed by or added after the
original patch.
* Kill per_cpu_var() macro.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcutorture.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 4 |
3 files changed, 9 insertions, 9 deletions
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 178967b6434e..e339ab349121 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -731,13 +731,13 @@ static void rcu_torture_timer(unsigned long unused) | |||
731 | /* Should not happen, but... */ | 731 | /* Should not happen, but... */ |
732 | pipe_count = RCU_TORTURE_PIPE_LEN; | 732 | pipe_count = RCU_TORTURE_PIPE_LEN; |
733 | } | 733 | } |
734 | __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]); | 734 | __this_cpu_inc(rcu_torture_count[pipe_count]); |
735 | completed = cur_ops->completed() - completed; | 735 | completed = cur_ops->completed() - completed; |
736 | if (completed > RCU_TORTURE_PIPE_LEN) { | 736 | if (completed > RCU_TORTURE_PIPE_LEN) { |
737 | /* Should not happen, but... */ | 737 | /* Should not happen, but... */ |
738 | completed = RCU_TORTURE_PIPE_LEN; | 738 | completed = RCU_TORTURE_PIPE_LEN; |
739 | } | 739 | } |
740 | __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]); | 740 | __this_cpu_inc(rcu_torture_batch[completed]); |
741 | preempt_enable(); | 741 | preempt_enable(); |
742 | cur_ops->readunlock(idx); | 742 | cur_ops->readunlock(idx); |
743 | } | 743 | } |
@@ -786,13 +786,13 @@ rcu_torture_reader(void *arg) | |||
786 | /* Should not happen, but... */ | 786 | /* Should not happen, but... */ |
787 | pipe_count = RCU_TORTURE_PIPE_LEN; | 787 | pipe_count = RCU_TORTURE_PIPE_LEN; |
788 | } | 788 | } |
789 | __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]); | 789 | __this_cpu_inc(rcu_torture_count[pipe_count]); |
790 | completed = cur_ops->completed() - completed; | 790 | completed = cur_ops->completed() - completed; |
791 | if (completed > RCU_TORTURE_PIPE_LEN) { | 791 | if (completed > RCU_TORTURE_PIPE_LEN) { |
792 | /* Should not happen, but... */ | 792 | /* Should not happen, but... */ |
793 | completed = RCU_TORTURE_PIPE_LEN; | 793 | completed = RCU_TORTURE_PIPE_LEN; |
794 | } | 794 | } |
795 | __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]); | 795 | __this_cpu_inc(rcu_torture_batch[completed]); |
796 | preempt_enable(); | 796 | preempt_enable(); |
797 | cur_ops->readunlock(idx); | 797 | cur_ops->readunlock(idx); |
798 | schedule(); | 798 | schedule(); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 85a5ed70b5b2..b808177af816 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -91,12 +91,12 @@ DEFINE_PER_CPU(int, ftrace_cpu_disabled); | |||
91 | static inline void ftrace_disable_cpu(void) | 91 | static inline void ftrace_disable_cpu(void) |
92 | { | 92 | { |
93 | preempt_disable(); | 93 | preempt_disable(); |
94 | __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); | 94 | __this_cpu_inc(ftrace_cpu_disabled); |
95 | } | 95 | } |
96 | 96 | ||
97 | static inline void ftrace_enable_cpu(void) | 97 | static inline void ftrace_enable_cpu(void) |
98 | { | 98 | { |
99 | __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); | 99 | __this_cpu_dec(ftrace_cpu_disabled); |
100 | preempt_enable(); | 100 | preempt_enable(); |
101 | } | 101 | } |
102 | 102 | ||
@@ -1085,7 +1085,7 @@ trace_function(struct trace_array *tr, | |||
1085 | struct ftrace_entry *entry; | 1085 | struct ftrace_entry *entry; |
1086 | 1086 | ||
1087 | /* If we are reading the ring buffer, don't trace */ | 1087 | /* If we are reading the ring buffer, don't trace */ |
1088 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) | 1088 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) |
1089 | return; | 1089 | return; |
1090 | 1090 | ||
1091 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), | 1091 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 90a6daa10962..8614e3241ff8 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -176,7 +176,7 @@ static int __trace_graph_entry(struct trace_array *tr, | |||
176 | struct ring_buffer *buffer = tr->buffer; | 176 | struct ring_buffer *buffer = tr->buffer; |
177 | struct ftrace_graph_ent_entry *entry; | 177 | struct ftrace_graph_ent_entry *entry; |
178 | 178 | ||
179 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) | 179 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) |
180 | return 0; | 180 | return 0; |
181 | 181 | ||
182 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, | 182 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, |
@@ -240,7 +240,7 @@ static void __trace_graph_return(struct trace_array *tr, | |||
240 | struct ring_buffer *buffer = tr->buffer; | 240 | struct ring_buffer *buffer = tr->buffer; |
241 | struct ftrace_graph_ret_entry *entry; | 241 | struct ftrace_graph_ret_entry *entry; |
242 | 242 | ||
243 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) | 243 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) |
244 | return; | 244 | return; |
245 | 245 | ||
246 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, | 246 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, |