diff options
author | Matt Fleming <matt@console-pimps.org> | 2010-09-27 15:22:24 -0400 |
---|---|---|
committer | Robert Richter <robert.richter@amd.com> | 2010-10-11 04:38:13 -0400 |
commit | 3bf101ba42a1c89b5afbc7492e7647dae5e18735 (patch) | |
tree | 2501d57e827b283da1a0240630fe5988d155f07b /arch/arm/oprofile | |
parent | 4cbe75be5c6ae86bdc7daec864eeb2dfd66f48bb (diff) |
perf: Add helper function to return number of counters
The number of counters for the registered pmu is needed in a few places
so provide a helper function that returns this number.
Signed-off-by: Matt Fleming <matt@console-pimps.org>
Tested-by: Will Deacon <will.deacon@arm.com>
Acked-by: Paul Mundt <lethal@linux-sh.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'arch/arm/oprofile')
-rw-r--r-- | arch/arm/oprofile/common.c | 31 |
1 files changed, 18 insertions, 13 deletions
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c index d660cb8dab36..1e971a7fcf82 100644 --- a/arch/arm/oprofile/common.c +++ b/arch/arm/oprofile/common.c | |||
@@ -43,7 +43,7 @@ static DEFINE_MUTEX(op_arm_mutex); | |||
43 | 43 | ||
44 | static struct op_counter_config *counter_config; | 44 | static struct op_counter_config *counter_config; |
45 | static struct perf_event **perf_events[nr_cpumask_bits]; | 45 | static struct perf_event **perf_events[nr_cpumask_bits]; |
46 | static int perf_num_counters; | 46 | static int num_counters; |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * Overflow callback for oprofile. | 49 | * Overflow callback for oprofile. |
@@ -54,11 +54,11 @@ static void op_overflow_handler(struct perf_event *event, int unused, | |||
54 | int id; | 54 | int id; |
55 | u32 cpu = smp_processor_id(); | 55 | u32 cpu = smp_processor_id(); |
56 | 56 | ||
57 | for (id = 0; id < perf_num_counters; ++id) | 57 | for (id = 0; id < num_counters; ++id) |
58 | if (perf_events[cpu][id] == event) | 58 | if (perf_events[cpu][id] == event) |
59 | break; | 59 | break; |
60 | 60 | ||
61 | if (id != perf_num_counters) | 61 | if (id != num_counters) |
62 | oprofile_add_sample(regs, id); | 62 | oprofile_add_sample(regs, id); |
63 | else | 63 | else |
64 | pr_warning("oprofile: ignoring spurious overflow " | 64 | pr_warning("oprofile: ignoring spurious overflow " |
@@ -76,7 +76,7 @@ static void op_perf_setup(void) | |||
76 | u32 size = sizeof(struct perf_event_attr); | 76 | u32 size = sizeof(struct perf_event_attr); |
77 | struct perf_event_attr *attr; | 77 | struct perf_event_attr *attr; |
78 | 78 | ||
79 | for (i = 0; i < perf_num_counters; ++i) { | 79 | for (i = 0; i < num_counters; ++i) { |
80 | attr = &counter_config[i].attr; | 80 | attr = &counter_config[i].attr; |
81 | memset(attr, 0, size); | 81 | memset(attr, 0, size); |
82 | attr->type = PERF_TYPE_RAW; | 82 | attr->type = PERF_TYPE_RAW; |
@@ -131,7 +131,7 @@ static int op_perf_start(void) | |||
131 | int cpu, event, ret = 0; | 131 | int cpu, event, ret = 0; |
132 | 132 | ||
133 | for_each_online_cpu(cpu) { | 133 | for_each_online_cpu(cpu) { |
134 | for (event = 0; event < perf_num_counters; ++event) { | 134 | for (event = 0; event < num_counters; ++event) { |
135 | ret = op_create_counter(cpu, event); | 135 | ret = op_create_counter(cpu, event); |
136 | if (ret) | 136 | if (ret) |
137 | goto out; | 137 | goto out; |
@@ -150,7 +150,7 @@ static void op_perf_stop(void) | |||
150 | int cpu, event; | 150 | int cpu, event; |
151 | 151 | ||
152 | for_each_online_cpu(cpu) | 152 | for_each_online_cpu(cpu) |
153 | for (event = 0; event < perf_num_counters; ++event) | 153 | for (event = 0; event < num_counters; ++event) |
154 | op_destroy_counter(cpu, event); | 154 | op_destroy_counter(cpu, event); |
155 | } | 155 | } |
156 | 156 | ||
@@ -179,7 +179,7 @@ static int op_arm_create_files(struct super_block *sb, struct dentry *root) | |||
179 | { | 179 | { |
180 | unsigned int i; | 180 | unsigned int i; |
181 | 181 | ||
182 | for (i = 0; i < perf_num_counters; i++) { | 182 | for (i = 0; i < num_counters; i++) { |
183 | struct dentry *dir; | 183 | struct dentry *dir; |
184 | char buf[4]; | 184 | char buf[4]; |
185 | 185 | ||
@@ -353,14 +353,19 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) | |||
353 | 353 | ||
354 | memset(&perf_events, 0, sizeof(perf_events)); | 354 | memset(&perf_events, 0, sizeof(perf_events)); |
355 | 355 | ||
356 | perf_num_counters = armpmu_get_max_events(); | 356 | num_counters = perf_num_counters(); |
357 | if (num_counters <= 0) { | ||
358 | pr_info("oprofile: no performance counters\n"); | ||
359 | ret = -ENODEV; | ||
360 | goto out; | ||
361 | } | ||
357 | 362 | ||
358 | counter_config = kcalloc(perf_num_counters, | 363 | counter_config = kcalloc(num_counters, |
359 | sizeof(struct op_counter_config), GFP_KERNEL); | 364 | sizeof(struct op_counter_config), GFP_KERNEL); |
360 | 365 | ||
361 | if (!counter_config) { | 366 | if (!counter_config) { |
362 | pr_info("oprofile: failed to allocate %d " | 367 | pr_info("oprofile: failed to allocate %d " |
363 | "counters\n", perf_num_counters); | 368 | "counters\n", num_counters); |
364 | ret = -ENOMEM; | 369 | ret = -ENOMEM; |
365 | goto out; | 370 | goto out; |
366 | } | 371 | } |
@@ -370,11 +375,11 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) | |||
370 | goto out; | 375 | goto out; |
371 | 376 | ||
372 | for_each_possible_cpu(cpu) { | 377 | for_each_possible_cpu(cpu) { |
373 | perf_events[cpu] = kcalloc(perf_num_counters, | 378 | perf_events[cpu] = kcalloc(num_counters, |
374 | sizeof(struct perf_event *), GFP_KERNEL); | 379 | sizeof(struct perf_event *), GFP_KERNEL); |
375 | if (!perf_events[cpu]) { | 380 | if (!perf_events[cpu]) { |
376 | pr_info("oprofile: failed to allocate %d perf events " | 381 | pr_info("oprofile: failed to allocate %d perf events " |
377 | "for cpu %d\n", perf_num_counters, cpu); | 382 | "for cpu %d\n", num_counters, cpu); |
378 | ret = -ENOMEM; | 383 | ret = -ENOMEM; |
379 | goto out; | 384 | goto out; |
380 | } | 385 | } |
@@ -409,7 +414,7 @@ void __exit oprofile_arch_exit(void) | |||
409 | struct perf_event *event; | 414 | struct perf_event *event; |
410 | 415 | ||
411 | for_each_possible_cpu(cpu) { | 416 | for_each_possible_cpu(cpu) { |
412 | for (id = 0; id < perf_num_counters; ++id) { | 417 | for (id = 0; id < num_counters; ++id) { |
413 | event = perf_events[cpu][id]; | 418 | event = perf_events[cpu][id]; |
414 | if (event) | 419 | if (event) |
415 | perf_event_release_kernel(event); | 420 | perf_event_release_kernel(event); |