aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/oprofile
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2012-02-23 11:07:06 -0500
committerRobert Richter <robert.richter@amd.com>2012-06-22 10:31:20 -0400
commitf8bbfd7d28303967ca4e8597de9bdc9bf8b197e7 (patch)
tree4adbc6e80a52d85b96509bee1778349c71e3c8a6 /drivers/oprofile
parente734568b675c985db2026848fefaac01c22977a5 (diff)
oprofile, perf: Use per-cpu framework
This changes oprofile_perf.c to use the per-cpu framework. Using the per-cpu framework should avoid error like the following: arch/arm/oprofile/../../../drivers/oprofile/oprofile_perf.c:28:28: error: variably modified 'perf_events' at file scope Reported-by: William Cohen <wcohen@redhat.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'drivers/oprofile')
-rw-r--r--drivers/oprofile/oprofile_perf.c23
1 files changed, 11 insertions, 12 deletions
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c
index efc4b7f308cf..f3cfa0b9adfa 100644
--- a/drivers/oprofile/oprofile_perf.c
+++ b/drivers/oprofile/oprofile_perf.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright 2010 ARM Ltd. 2 * Copyright 2010 ARM Ltd.
3 * Copyright 2012 Advanced Micro Devices, Inc., Robert Richter
3 * 4 *
4 * Perf-events backend for OProfile. 5 * Perf-events backend for OProfile.
5 */ 6 */
@@ -25,7 +26,7 @@ static int oprofile_perf_enabled;
25static DEFINE_MUTEX(oprofile_perf_mutex); 26static DEFINE_MUTEX(oprofile_perf_mutex);
26 27
27static struct op_counter_config *counter_config; 28static struct op_counter_config *counter_config;
28static struct perf_event **perf_events[NR_CPUS]; 29static DEFINE_PER_CPU(struct perf_event **, perf_events);
29static int num_counters; 30static int num_counters;
30 31
31/* 32/*
@@ -38,7 +39,7 @@ static void op_overflow_handler(struct perf_event *event,
38 u32 cpu = smp_processor_id(); 39 u32 cpu = smp_processor_id();
39 40
40 for (id = 0; id < num_counters; ++id) 41 for (id = 0; id < num_counters; ++id)
41 if (perf_events[cpu][id] == event) 42 if (per_cpu(perf_events, cpu)[id] == event)
42 break; 43 break;
43 44
44 if (id != num_counters) 45 if (id != num_counters)
@@ -74,7 +75,7 @@ static int op_create_counter(int cpu, int event)
74{ 75{
75 struct perf_event *pevent; 76 struct perf_event *pevent;
76 77
77 if (!counter_config[event].enabled || perf_events[cpu][event]) 78 if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event])
78 return 0; 79 return 0;
79 80
80 pevent = perf_event_create_kernel_counter(&counter_config[event].attr, 81 pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
@@ -91,18 +92,18 @@ static int op_create_counter(int cpu, int event)
91 return -EBUSY; 92 return -EBUSY;
92 } 93 }
93 94
94 perf_events[cpu][event] = pevent; 95 per_cpu(perf_events, cpu)[event] = pevent;
95 96
96 return 0; 97 return 0;
97} 98}
98 99
99static void op_destroy_counter(int cpu, int event) 100static void op_destroy_counter(int cpu, int event)
100{ 101{
101 struct perf_event *pevent = perf_events[cpu][event]; 102 struct perf_event *pevent = per_cpu(perf_events, cpu)[event];
102 103
103 if (pevent) { 104 if (pevent) {
104 perf_event_release_kernel(pevent); 105 perf_event_release_kernel(pevent);
105 perf_events[cpu][event] = NULL; 106 per_cpu(perf_events, cpu)[event] = NULL;
106 } 107 }
107} 108}
108 109
@@ -257,12 +258,12 @@ void oprofile_perf_exit(void)
257 258
258 for_each_possible_cpu(cpu) { 259 for_each_possible_cpu(cpu) {
259 for (id = 0; id < num_counters; ++id) { 260 for (id = 0; id < num_counters; ++id) {
260 event = perf_events[cpu][id]; 261 event = per_cpu(perf_events, cpu)[id];
261 if (event) 262 if (event)
262 perf_event_release_kernel(event); 263 perf_event_release_kernel(event);
263 } 264 }
264 265
265 kfree(perf_events[cpu]); 266 kfree(per_cpu(perf_events, cpu));
266 } 267 }
267 268
268 kfree(counter_config); 269 kfree(counter_config);
@@ -277,8 +278,6 @@ int __init oprofile_perf_init(struct oprofile_operations *ops)
277 if (ret) 278 if (ret)
278 return ret; 279 return ret;
279 280
280 memset(&perf_events, 0, sizeof(perf_events));
281
282 num_counters = perf_num_counters(); 281 num_counters = perf_num_counters();
283 if (num_counters <= 0) { 282 if (num_counters <= 0) {
284 pr_info("oprofile: no performance counters\n"); 283 pr_info("oprofile: no performance counters\n");
@@ -298,9 +297,9 @@ int __init oprofile_perf_init(struct oprofile_operations *ops)
298 } 297 }
299 298
300 for_each_possible_cpu(cpu) { 299 for_each_possible_cpu(cpu) {
301 perf_events[cpu] = kcalloc(num_counters, 300 per_cpu(perf_events, cpu) = kcalloc(num_counters,
302 sizeof(struct perf_event *), GFP_KERNEL); 301 sizeof(struct perf_event *), GFP_KERNEL);
303 if (!perf_events[cpu]) { 302 if (!per_cpu(perf_events, cpu)) {
304 pr_info("oprofile: failed to allocate %d perf events " 303 pr_info("oprofile: failed to allocate %d perf events "
305 "for cpu %d\n", num_counters, cpu); 304 "for cpu %d\n", num_counters, cpu);
306 ret = -ENOMEM; 305 ret = -ENOMEM;