diff options
author | Tejun Heo <tj@kernel.org> | 2009-06-24 02:13:48 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-06-24 02:13:48 -0400 |
commit | 245b2e70eabd797932adb263a65da0bab3711753 (patch) | |
tree | 30f0b790dadd2b70bf06e534abcf66a76e97b05a /kernel | |
parent | b9bf3121af348d9255f1c917830fe8c2df52efcb (diff) |
percpu: clean up percpu variable definitions
Percpu variable definition is about to be updated such that all percpu
symbols including the static ones must be unique. Update percpu
variable definitions accordingly.
* as,cfq: rename ioc_count uniquely
* cpufreq: rename cpu_dbs_info uniquely
* xen: move nesting_count out of xen_evtchn_do_upcall() and rename it
* mm: move ratelimits out of balance_dirty_pages_ratelimited_nr() and
rename it
* ipv4,6: rename cookie_scratch uniquely
* x86 perf_counter: rename prev_left to pmc_prev_left, irq_entry to
pmc_irq_entry and nmi_entry to pmc_nmi_entry
* perf_counter: rename disable_count to perf_disable_count
* ftrace: rename test_event_disable to ftrace_test_event_disable
* kmemleak: rename test_pointer to kmemleak_test_pointer
* mce: rename next_interval to mce_next_interval
[ Impact: percpu usage cleanups, no duplicate static percpu var names ]
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Dave Jones <davej@redhat.com>
Cc: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: linux-mm <linux-mm@kvack.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Steven Rostedt <srostedt@redhat.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andi Kleen <andi@firstfloor.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_counter.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 6 |
2 files changed, 6 insertions, 6 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 1a933a221ea4..1fd7a2e75754 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -98,16 +98,16 @@ hw_perf_group_sched_in(struct perf_counter *group_leader, | |||
98 | 98 | ||
99 | void __weak perf_counter_print_debug(void) { } | 99 | void __weak perf_counter_print_debug(void) { } |
100 | 100 | ||
101 | static DEFINE_PER_CPU(int, disable_count); | 101 | static DEFINE_PER_CPU(int, perf_disable_count); |
102 | 102 | ||
103 | void __perf_disable(void) | 103 | void __perf_disable(void) |
104 | { | 104 | { |
105 | __get_cpu_var(disable_count)++; | 105 | __get_cpu_var(perf_disable_count)++; |
106 | } | 106 | } |
107 | 107 | ||
108 | bool __perf_enable(void) | 108 | bool __perf_enable(void) |
109 | { | 109 | { |
110 | return !--__get_cpu_var(disable_count); | 110 | return !--__get_cpu_var(perf_disable_count); |
111 | } | 111 | } |
112 | 112 | ||
113 | void perf_disable(void) | 113 | void perf_disable(void) |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index aa08be69a1b6..54b1de5074b6 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -1318,7 +1318,7 @@ static __init void event_trace_self_tests(void) | |||
1318 | 1318 | ||
1319 | #ifdef CONFIG_FUNCTION_TRACER | 1319 | #ifdef CONFIG_FUNCTION_TRACER |
1320 | 1320 | ||
1321 | static DEFINE_PER_CPU(atomic_t, test_event_disable); | 1321 | static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); |
1322 | 1322 | ||
1323 | static void | 1323 | static void |
1324 | function_test_events_call(unsigned long ip, unsigned long parent_ip) | 1324 | function_test_events_call(unsigned long ip, unsigned long parent_ip) |
@@ -1334,7 +1334,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) | |||
1334 | pc = preempt_count(); | 1334 | pc = preempt_count(); |
1335 | resched = ftrace_preempt_disable(); | 1335 | resched = ftrace_preempt_disable(); |
1336 | cpu = raw_smp_processor_id(); | 1336 | cpu = raw_smp_processor_id(); |
1337 | disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu)); | 1337 | disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); |
1338 | 1338 | ||
1339 | if (disabled != 1) | 1339 | if (disabled != 1) |
1340 | goto out; | 1340 | goto out; |
@@ -1352,7 +1352,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) | |||
1352 | trace_nowake_buffer_unlock_commit(event, flags, pc); | 1352 | trace_nowake_buffer_unlock_commit(event, flags, pc); |
1353 | 1353 | ||
1354 | out: | 1354 | out: |
1355 | atomic_dec(&per_cpu(test_event_disable, cpu)); | 1355 | atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); |
1356 | ftrace_preempt_enable(resched); | 1356 | ftrace_preempt_enable(resched); |
1357 | } | 1357 | } |
1358 | 1358 | ||