diff options
| -rw-r--r-- | arch/arm/kernel/perf_event.c | 6 | ||||
| -rw-r--r-- | arch/arm/oprofile/common.c | 31 | ||||
| -rw-r--r-- | arch/sh/kernel/perf_event.c | 9 | ||||
| -rw-r--r-- | include/linux/perf_event.h | 1 |
4 files changed, 34 insertions, 13 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 417c392ddf1c..3b0aedfb96e7 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
| @@ -123,6 +123,12 @@ armpmu_get_max_events(void) | |||
| 123 | } | 123 | } |
| 124 | EXPORT_SYMBOL_GPL(armpmu_get_max_events); | 124 | EXPORT_SYMBOL_GPL(armpmu_get_max_events); |
| 125 | 125 | ||
| 126 | int perf_num_counters(void) | ||
| 127 | { | ||
| 128 | return armpmu_get_max_events(); | ||
| 129 | } | ||
| 130 | EXPORT_SYMBOL_GPL(perf_num_counters); | ||
| 131 | |||
| 126 | #define HW_OP_UNSUPPORTED 0xFFFF | 132 | #define HW_OP_UNSUPPORTED 0xFFFF |
| 127 | 133 | ||
| 128 | #define C(_x) \ | 134 | #define C(_x) \ |
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c index d660cb8dab36..1e971a7fcf82 100644 --- a/arch/arm/oprofile/common.c +++ b/arch/arm/oprofile/common.c | |||
| @@ -43,7 +43,7 @@ static DEFINE_MUTEX(op_arm_mutex); | |||
| 43 | 43 | ||
| 44 | static struct op_counter_config *counter_config; | 44 | static struct op_counter_config *counter_config; |
| 45 | static struct perf_event **perf_events[nr_cpumask_bits]; | 45 | static struct perf_event **perf_events[nr_cpumask_bits]; |
| 46 | static int perf_num_counters; | 46 | static int num_counters; |
| 47 | 47 | ||
| 48 | /* | 48 | /* |
| 49 | * Overflow callback for oprofile. | 49 | * Overflow callback for oprofile. |
| @@ -54,11 +54,11 @@ static void op_overflow_handler(struct perf_event *event, int unused, | |||
| 54 | int id; | 54 | int id; |
| 55 | u32 cpu = smp_processor_id(); | 55 | u32 cpu = smp_processor_id(); |
| 56 | 56 | ||
| 57 | for (id = 0; id < perf_num_counters; ++id) | 57 | for (id = 0; id < num_counters; ++id) |
| 58 | if (perf_events[cpu][id] == event) | 58 | if (perf_events[cpu][id] == event) |
| 59 | break; | 59 | break; |
| 60 | 60 | ||
| 61 | if (id != perf_num_counters) | 61 | if (id != num_counters) |
| 62 | oprofile_add_sample(regs, id); | 62 | oprofile_add_sample(regs, id); |
| 63 | else | 63 | else |
| 64 | pr_warning("oprofile: ignoring spurious overflow " | 64 | pr_warning("oprofile: ignoring spurious overflow " |
| @@ -76,7 +76,7 @@ static void op_perf_setup(void) | |||
| 76 | u32 size = sizeof(struct perf_event_attr); | 76 | u32 size = sizeof(struct perf_event_attr); |
| 77 | struct perf_event_attr *attr; | 77 | struct perf_event_attr *attr; |
| 78 | 78 | ||
| 79 | for (i = 0; i < perf_num_counters; ++i) { | 79 | for (i = 0; i < num_counters; ++i) { |
| 80 | attr = &counter_config[i].attr; | 80 | attr = &counter_config[i].attr; |
| 81 | memset(attr, 0, size); | 81 | memset(attr, 0, size); |
| 82 | attr->type = PERF_TYPE_RAW; | 82 | attr->type = PERF_TYPE_RAW; |
| @@ -131,7 +131,7 @@ static int op_perf_start(void) | |||
| 131 | int cpu, event, ret = 0; | 131 | int cpu, event, ret = 0; |
| 132 | 132 | ||
| 133 | for_each_online_cpu(cpu) { | 133 | for_each_online_cpu(cpu) { |
| 134 | for (event = 0; event < perf_num_counters; ++event) { | 134 | for (event = 0; event < num_counters; ++event) { |
| 135 | ret = op_create_counter(cpu, event); | 135 | ret = op_create_counter(cpu, event); |
| 136 | if (ret) | 136 | if (ret) |
| 137 | goto out; | 137 | goto out; |
| @@ -150,7 +150,7 @@ static void op_perf_stop(void) | |||
| 150 | int cpu, event; | 150 | int cpu, event; |
| 151 | 151 | ||
| 152 | for_each_online_cpu(cpu) | 152 | for_each_online_cpu(cpu) |
| 153 | for (event = 0; event < perf_num_counters; ++event) | 153 | for (event = 0; event < num_counters; ++event) |
| 154 | op_destroy_counter(cpu, event); | 154 | op_destroy_counter(cpu, event); |
| 155 | } | 155 | } |
| 156 | 156 | ||
| @@ -179,7 +179,7 @@ static int op_arm_create_files(struct super_block *sb, struct dentry *root) | |||
| 179 | { | 179 | { |
| 180 | unsigned int i; | 180 | unsigned int i; |
| 181 | 181 | ||
| 182 | for (i = 0; i < perf_num_counters; i++) { | 182 | for (i = 0; i < num_counters; i++) { |
| 183 | struct dentry *dir; | 183 | struct dentry *dir; |
| 184 | char buf[4]; | 184 | char buf[4]; |
| 185 | 185 | ||
| @@ -353,14 +353,19 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) | |||
| 353 | 353 | ||
| 354 | memset(&perf_events, 0, sizeof(perf_events)); | 354 | memset(&perf_events, 0, sizeof(perf_events)); |
| 355 | 355 | ||
| 356 | perf_num_counters = armpmu_get_max_events(); | 356 | num_counters = perf_num_counters(); |
| 357 | if (num_counters <= 0) { | ||
| 358 | pr_info("oprofile: no performance counters\n"); | ||
| 359 | ret = -ENODEV; | ||
| 360 | goto out; | ||
| 361 | } | ||
| 357 | 362 | ||
| 358 | counter_config = kcalloc(perf_num_counters, | 363 | counter_config = kcalloc(num_counters, |
| 359 | sizeof(struct op_counter_config), GFP_KERNEL); | 364 | sizeof(struct op_counter_config), GFP_KERNEL); |
| 360 | 365 | ||
| 361 | if (!counter_config) { | 366 | if (!counter_config) { |
| 362 | pr_info("oprofile: failed to allocate %d " | 367 | pr_info("oprofile: failed to allocate %d " |
| 363 | "counters\n", perf_num_counters); | 368 | "counters\n", num_counters); |
| 364 | ret = -ENOMEM; | 369 | ret = -ENOMEM; |
| 365 | goto out; | 370 | goto out; |
| 366 | } | 371 | } |
| @@ -370,11 +375,11 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) | |||
| 370 | goto out; | 375 | goto out; |
| 371 | 376 | ||
| 372 | for_each_possible_cpu(cpu) { | 377 | for_each_possible_cpu(cpu) { |
| 373 | perf_events[cpu] = kcalloc(perf_num_counters, | 378 | perf_events[cpu] = kcalloc(num_counters, |
| 374 | sizeof(struct perf_event *), GFP_KERNEL); | 379 | sizeof(struct perf_event *), GFP_KERNEL); |
| 375 | if (!perf_events[cpu]) { | 380 | if (!perf_events[cpu]) { |
| 376 | pr_info("oprofile: failed to allocate %d perf events " | 381 | pr_info("oprofile: failed to allocate %d perf events " |
| 377 | "for cpu %d\n", perf_num_counters, cpu); | 382 | "for cpu %d\n", num_counters, cpu); |
| 378 | ret = -ENOMEM; | 383 | ret = -ENOMEM; |
| 379 | goto out; | 384 | goto out; |
| 380 | } | 385 | } |
| @@ -409,7 +414,7 @@ void __exit oprofile_arch_exit(void) | |||
| 409 | struct perf_event *event; | 414 | struct perf_event *event; |
| 410 | 415 | ||
| 411 | for_each_possible_cpu(cpu) { | 416 | for_each_possible_cpu(cpu) { |
| 412 | for (id = 0; id < perf_num_counters; ++id) { | 417 | for (id = 0; id < num_counters; ++id) { |
| 413 | event = perf_events[cpu][id]; | 418 | event = perf_events[cpu][id]; |
| 414 | if (event) | 419 | if (event) |
| 415 | perf_event_release_kernel(event); | 420 | perf_event_release_kernel(event); |
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 7a3dc3567258..2cb9ad59d4b1 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c | |||
| @@ -59,6 +59,15 @@ static inline int sh_pmu_initialized(void) | |||
| 59 | return !!sh_pmu; | 59 | return !!sh_pmu; |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | int perf_num_counters(void) | ||
| 63 | { | ||
| 64 | if (!sh_pmu) | ||
| 65 | return 0; | ||
| 66 | |||
| 67 | return sh_pmu->num_events; | ||
| 68 | } | ||
| 69 | EXPORT_SYMBOL_GPL(perf_num_counters); | ||
| 70 | |||
| 62 | /* | 71 | /* |
| 63 | * Release the PMU if this is the last perf_event. | 72 | * Release the PMU if this is the last perf_event. |
| 64 | */ | 73 | */ |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 716f99b682c1..1a0219247183 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
| @@ -849,6 +849,7 @@ extern int perf_max_events; | |||
| 849 | 849 | ||
| 850 | extern const struct pmu *hw_perf_event_init(struct perf_event *event); | 850 | extern const struct pmu *hw_perf_event_init(struct perf_event *event); |
| 851 | 851 | ||
| 852 | extern int perf_num_counters(void); | ||
| 852 | extern void perf_event_task_sched_in(struct task_struct *task); | 853 | extern void perf_event_task_sched_in(struct task_struct *task); |
| 853 | extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); | 854 | extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); |
| 854 | extern void perf_event_task_tick(struct task_struct *task); | 855 | extern void perf_event_task_tick(struct task_struct *task); |
