aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/perf_event.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2012-07-29 07:36:28 -0400
committerWill Deacon <will.deacon@arm.com>2012-08-23 06:35:52 -0400
commit6dbc00297095122ea89e016ce6affad0b7c0ddac (patch)
treef3c01a92818dd1a8e9cf9cd9a52cae8229332c55 /arch/arm/kernel/perf_event.c
parent04236f9fe07462849215c67cae6147661368bfad (diff)
ARM: perf: prepare for moving CPU PMU code into separate file
The CPU PMU code is tightly coupled with generic ARM PMU handling code. This makes it cumbersome when trying to add support for other ARM PMUs (e.g. interconnect, L2 cache controller, bus) as the generic parts of the code are not readily reusable. This patch cleans up perf_event.c so that reusable code is exposed via header files to other potential PMU drivers. The CPU code is consistently named to identify it as such and also to prepare for moving it into a separate file. Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/kernel/perf_event.c')
-rw-r--r--arch/arm/kernel/perf_event.c69
1 files changed, 27 insertions, 42 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 7c29914bdcc6..9e3afd1994d9 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -29,26 +29,17 @@
29#include <asm/pmu.h> 29#include <asm/pmu.h>
30#include <asm/stacktrace.h> 30#include <asm/stacktrace.h>
31 31
32/* 32/* Set at runtime when we know what CPU type we are. */
33 * ARMv6 supports a maximum of 3 events, starting from index 0. If we add 33static struct arm_pmu *cpu_pmu;
34 * another platform that supports more, we need to increase this to be the
35 * largest of all platforms.
36 *
37 * ARMv7 supports up to 32 events:
38 * cycle counter CCNT + 31 events counters CNT0..30.
39 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
40 */
41#define ARMPMU_MAX_HWEVENTS 32
42 34
43static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); 35static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
44static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); 36static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
45static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); 37static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
46 38
47#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) 39/*
48 40 * Despite the names, these two functions are CPU-specific and are used
49/* Set at runtime when we know what CPU type we are. */ 41 * by the OProfile/perf code.
50static struct arm_pmu *cpu_pmu; 42 */
51
52const char *perf_pmu_name(void) 43const char *perf_pmu_name(void)
53{ 44{
54 if (!cpu_pmu) 45 if (!cpu_pmu)
@@ -69,13 +60,6 @@ int perf_num_counters(void)
69} 60}
70EXPORT_SYMBOL_GPL(perf_num_counters); 61EXPORT_SYMBOL_GPL(perf_num_counters);
71 62
72#define HW_OP_UNSUPPORTED 0xFFFF
73
74#define C(_x) \
75 PERF_COUNT_HW_CACHE_##_x
76
77#define CACHE_OP_UNSUPPORTED 0xFFFF
78
79static int 63static int
80armpmu_map_cache_event(const unsigned (*cache_map) 64armpmu_map_cache_event(const unsigned (*cache_map)
81 [PERF_COUNT_HW_CACHE_MAX] 65 [PERF_COUNT_HW_CACHE_MAX]
@@ -106,7 +90,7 @@ armpmu_map_cache_event(const unsigned (*cache_map)
106} 90}
107 91
108static int 92static int
109armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) 93armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
110{ 94{
111 int mapping = (*event_map)[config]; 95 int mapping = (*event_map)[config];
112 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; 96 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
@@ -118,19 +102,20 @@ armpmu_map_raw_event(u32 raw_event_mask, u64 config)
118 return (int)(config & raw_event_mask); 102 return (int)(config & raw_event_mask);
119} 103}
120 104
121static int map_cpu_event(struct perf_event *event, 105int
122 const unsigned (*event_map)[PERF_COUNT_HW_MAX], 106armpmu_map_event(struct perf_event *event,
123 const unsigned (*cache_map) 107 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
124 [PERF_COUNT_HW_CACHE_MAX] 108 const unsigned (*cache_map)
125 [PERF_COUNT_HW_CACHE_OP_MAX] 109 [PERF_COUNT_HW_CACHE_MAX]
126 [PERF_COUNT_HW_CACHE_RESULT_MAX], 110 [PERF_COUNT_HW_CACHE_OP_MAX]
127 u32 raw_event_mask) 111 [PERF_COUNT_HW_CACHE_RESULT_MAX],
112 u32 raw_event_mask)
128{ 113{
129 u64 config = event->attr.config; 114 u64 config = event->attr.config;
130 115
131 switch (event->attr.type) { 116 switch (event->attr.type) {
132 case PERF_TYPE_HARDWARE: 117 case PERF_TYPE_HARDWARE:
133 return armpmu_map_event(event_map, config); 118 return armpmu_map_hw_event(event_map, config);
134 case PERF_TYPE_HW_CACHE: 119 case PERF_TYPE_HW_CACHE:
135 return armpmu_map_cache_event(cache_map, config); 120 return armpmu_map_cache_event(cache_map, config);
136 case PERF_TYPE_RAW: 121 case PERF_TYPE_RAW:
@@ -594,6 +579,10 @@ static int armpmu_runtime_suspend(struct device *dev)
594} 579}
595#endif 580#endif
596 581
582const struct dev_pm_ops armpmu_dev_pm_ops = {
583 SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
584};
585
597static void __init armpmu_init(struct arm_pmu *armpmu) 586static void __init armpmu_init(struct arm_pmu *armpmu)
598{ 587{
599 atomic_set(&armpmu->active_events, 0); 588 atomic_set(&armpmu->active_events, 0);
@@ -624,7 +613,7 @@ int armpmu_register(struct arm_pmu *armpmu, char *name, int type)
624#include "perf_event_v6.c" 613#include "perf_event_v6.c"
625#include "perf_event_v7.c" 614#include "perf_event_v7.c"
626 615
627static struct pmu_hw_events *armpmu_get_cpu_events(void) 616static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
628{ 617{
629 return &__get_cpu_var(cpu_hw_events); 618 return &__get_cpu_var(cpu_hw_events);
630} 619}
@@ -638,7 +627,7 @@ static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu)
638 events->used_mask = per_cpu(used_mask, cpu); 627 events->used_mask = per_cpu(used_mask, cpu);
639 raw_spin_lock_init(&events->pmu_lock); 628 raw_spin_lock_init(&events->pmu_lock);
640 } 629 }
641 cpu_pmu->get_hw_events = armpmu_get_cpu_events; 630 cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events;
642 631
643 /* Ensure the PMU has sane values out of reset. */ 632 /* Ensure the PMU has sane values out of reset. */
644 if (cpu_pmu && cpu_pmu->reset) 633 if (cpu_pmu && cpu_pmu->reset)
@@ -651,8 +640,8 @@ static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu)
651 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading 640 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
652 * junk values out of them. 641 * junk values out of them.
653 */ 642 */
654static int __cpuinit pmu_cpu_notify(struct notifier_block *b, 643static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
655 unsigned long action, void *hcpu) 644 unsigned long action, void *hcpu)
656{ 645{
657 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) 646 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
658 return NOTIFY_DONE; 647 return NOTIFY_DONE;
@@ -663,12 +652,8 @@ static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
663 return NOTIFY_OK; 652 return NOTIFY_OK;
664} 653}
665 654
666static struct notifier_block __cpuinitdata pmu_cpu_notifier = { 655static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
667 .notifier_call = pmu_cpu_notify, 656 .notifier_call = cpu_pmu_notify,
668};
669
670static const struct dev_pm_ops armpmu_dev_pm_ops = {
671 SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
672}; 657};
673 658
674/* 659/*
@@ -771,7 +756,7 @@ static int __devinit cpu_pmu_device_probe(struct platform_device *pdev)
771 756
772 cpu_pmu->plat_device = pdev; 757 cpu_pmu->plat_device = pdev;
773 cpu_pmu_init(cpu_pmu); 758 cpu_pmu_init(cpu_pmu);
774 register_cpu_notifier(&pmu_cpu_notifier); 759 register_cpu_notifier(&cpu_pmu_hotplug_notifier);
775 armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW); 760 armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW);
776 761
777 return 0; 762 return 0;