aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/perf_event.h9
-rw-r--r--arch/arm/include/asm/pmu.h11
-rw-r--r--arch/arm/kernel/perf_event.c69
-rw-r--r--arch/arm/kernel/perf_event_v6.c4
-rw-r--r--arch/arm/kernel/perf_event_v7.c10
-rw-r--r--arch/arm/kernel/perf_event_xscale.c2
6 files changed, 53 insertions, 52 deletions
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index e074948d8143..625cd621a436 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -12,6 +12,13 @@
12#ifndef __ARM_PERF_EVENT_H__ 12#ifndef __ARM_PERF_EVENT_H__
13#define __ARM_PERF_EVENT_H__ 13#define __ARM_PERF_EVENT_H__
14 14
15/* Nothing to see here... */ 15/*
16 * The ARMv7 CPU PMU supports up to 32 event counters.
17 */
18#define ARMPMU_MAX_HWEVENTS 32
19
20#define HW_OP_UNSUPPORTED 0xFFFF
21#define C(_x) PERF_COUNT_HW_CACHE_##_x
22#define CACHE_OP_UNSUPPORTED 0xFFFF
16 23
17#endif /* __ARM_PERF_EVENT_H__ */ 24#endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index fbec73a0ee76..a993ad676047 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -89,7 +89,9 @@ struct arm_pmu {
89 89
90#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) 90#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
91 91
92int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type); 92extern const struct dev_pm_ops armpmu_dev_pm_ops;
93
94int armpmu_register(struct arm_pmu *armpmu, char *name, int type);
93 95
94u64 armpmu_event_update(struct perf_event *event, 96u64 armpmu_event_update(struct perf_event *event,
95 struct hw_perf_event *hwc, 97 struct hw_perf_event *hwc,
@@ -99,6 +101,13 @@ int armpmu_event_set_period(struct perf_event *event,
99 struct hw_perf_event *hwc, 101 struct hw_perf_event *hwc,
100 int idx); 102 int idx);
101 103
104int armpmu_map_event(struct perf_event *event,
105 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
106 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
107 [PERF_COUNT_HW_CACHE_OP_MAX]
108 [PERF_COUNT_HW_CACHE_RESULT_MAX],
109 u32 raw_event_mask);
110
102#endif /* CONFIG_HW_PERF_EVENTS */ 111#endif /* CONFIG_HW_PERF_EVENTS */
103 112
104#endif /* __ARM_PMU_H__ */ 113#endif /* __ARM_PMU_H__ */
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 7c29914bdcc6..9e3afd1994d9 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -29,26 +29,17 @@
29#include <asm/pmu.h> 29#include <asm/pmu.h>
30#include <asm/stacktrace.h> 30#include <asm/stacktrace.h>
31 31
32/* 32/* Set at runtime when we know what CPU type we are. */
33 * ARMv6 supports a maximum of 3 events, starting from index 0. If we add 33static struct arm_pmu *cpu_pmu;
34 * another platform that supports more, we need to increase this to be the
35 * largest of all platforms.
36 *
37 * ARMv7 supports up to 32 events:
38 * cycle counter CCNT + 31 events counters CNT0..30.
39 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
40 */
41#define ARMPMU_MAX_HWEVENTS 32
42 34
43static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); 35static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
44static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); 36static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
45static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); 37static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
46 38
47#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) 39/*
48 40 * Despite the names, these two functions are CPU-specific and are used
49/* Set at runtime when we know what CPU type we are. */ 41 * by the OProfile/perf code.
50static struct arm_pmu *cpu_pmu; 42 */
51
52const char *perf_pmu_name(void) 43const char *perf_pmu_name(void)
53{ 44{
54 if (!cpu_pmu) 45 if (!cpu_pmu)
@@ -69,13 +60,6 @@ int perf_num_counters(void)
69} 60}
70EXPORT_SYMBOL_GPL(perf_num_counters); 61EXPORT_SYMBOL_GPL(perf_num_counters);
71 62
72#define HW_OP_UNSUPPORTED 0xFFFF
73
74#define C(_x) \
75 PERF_COUNT_HW_CACHE_##_x
76
77#define CACHE_OP_UNSUPPORTED 0xFFFF
78
79static int 63static int
80armpmu_map_cache_event(const unsigned (*cache_map) 64armpmu_map_cache_event(const unsigned (*cache_map)
81 [PERF_COUNT_HW_CACHE_MAX] 65 [PERF_COUNT_HW_CACHE_MAX]
@@ -106,7 +90,7 @@ armpmu_map_cache_event(const unsigned (*cache_map)
106} 90}
107 91
108static int 92static int
109armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) 93armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
110{ 94{
111 int mapping = (*event_map)[config]; 95 int mapping = (*event_map)[config];
112 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; 96 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
@@ -118,19 +102,20 @@ armpmu_map_raw_event(u32 raw_event_mask, u64 config)
118 return (int)(config & raw_event_mask); 102 return (int)(config & raw_event_mask);
119} 103}
120 104
121static int map_cpu_event(struct perf_event *event, 105int
122 const unsigned (*event_map)[PERF_COUNT_HW_MAX], 106armpmu_map_event(struct perf_event *event,
123 const unsigned (*cache_map) 107 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
124 [PERF_COUNT_HW_CACHE_MAX] 108 const unsigned (*cache_map)
125 [PERF_COUNT_HW_CACHE_OP_MAX] 109 [PERF_COUNT_HW_CACHE_MAX]
126 [PERF_COUNT_HW_CACHE_RESULT_MAX], 110 [PERF_COUNT_HW_CACHE_OP_MAX]
127 u32 raw_event_mask) 111 [PERF_COUNT_HW_CACHE_RESULT_MAX],
112 u32 raw_event_mask)
128{ 113{
129 u64 config = event->attr.config; 114 u64 config = event->attr.config;
130 115
131 switch (event->attr.type) { 116 switch (event->attr.type) {
132 case PERF_TYPE_HARDWARE: 117 case PERF_TYPE_HARDWARE:
133 return armpmu_map_event(event_map, config); 118 return armpmu_map_hw_event(event_map, config);
134 case PERF_TYPE_HW_CACHE: 119 case PERF_TYPE_HW_CACHE:
135 return armpmu_map_cache_event(cache_map, config); 120 return armpmu_map_cache_event(cache_map, config);
136 case PERF_TYPE_RAW: 121 case PERF_TYPE_RAW:
@@ -594,6 +579,10 @@ static int armpmu_runtime_suspend(struct device *dev)
594} 579}
595#endif 580#endif
596 581
582const struct dev_pm_ops armpmu_dev_pm_ops = {
583 SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
584};
585
597static void __init armpmu_init(struct arm_pmu *armpmu) 586static void __init armpmu_init(struct arm_pmu *armpmu)
598{ 587{
599 atomic_set(&armpmu->active_events, 0); 588 atomic_set(&armpmu->active_events, 0);
@@ -624,7 +613,7 @@ int armpmu_register(struct arm_pmu *armpmu, char *name, int type)
624#include "perf_event_v6.c" 613#include "perf_event_v6.c"
625#include "perf_event_v7.c" 614#include "perf_event_v7.c"
626 615
627static struct pmu_hw_events *armpmu_get_cpu_events(void) 616static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
628{ 617{
629 return &__get_cpu_var(cpu_hw_events); 618 return &__get_cpu_var(cpu_hw_events);
630} 619}
@@ -638,7 +627,7 @@ static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu)
638 events->used_mask = per_cpu(used_mask, cpu); 627 events->used_mask = per_cpu(used_mask, cpu);
639 raw_spin_lock_init(&events->pmu_lock); 628 raw_spin_lock_init(&events->pmu_lock);
640 } 629 }
641 cpu_pmu->get_hw_events = armpmu_get_cpu_events; 630 cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events;
642 631
643 /* Ensure the PMU has sane values out of reset. */ 632 /* Ensure the PMU has sane values out of reset. */
644 if (cpu_pmu && cpu_pmu->reset) 633 if (cpu_pmu && cpu_pmu->reset)
@@ -651,8 +640,8 @@ static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu)
651 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading 640 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
652 * junk values out of them. 641 * junk values out of them.
653 */ 642 */
654static int __cpuinit pmu_cpu_notify(struct notifier_block *b, 643static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
655 unsigned long action, void *hcpu) 644 unsigned long action, void *hcpu)
656{ 645{
657 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) 646 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
658 return NOTIFY_DONE; 647 return NOTIFY_DONE;
@@ -663,12 +652,8 @@ static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
663 return NOTIFY_OK; 652 return NOTIFY_OK;
664} 653}
665 654
666static struct notifier_block __cpuinitdata pmu_cpu_notifier = { 655static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
667 .notifier_call = pmu_cpu_notify, 656 .notifier_call = cpu_pmu_notify,
668};
669
670static const struct dev_pm_ops armpmu_dev_pm_ops = {
671 SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
672}; 657};
673 658
674/* 659/*
@@ -771,7 +756,7 @@ static int __devinit cpu_pmu_device_probe(struct platform_device *pdev)
771 756
772 cpu_pmu->plat_device = pdev; 757 cpu_pmu->plat_device = pdev;
773 cpu_pmu_init(cpu_pmu); 758 cpu_pmu_init(cpu_pmu);
774 register_cpu_notifier(&pmu_cpu_notifier); 759 register_cpu_notifier(&cpu_pmu_hotplug_notifier);
775 armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW); 760 armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW);
776 761
777 return 0; 762 return 0;
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index a4328b12eed5..6ccc07971745 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -645,7 +645,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
645 645
646static int armv6_map_event(struct perf_event *event) 646static int armv6_map_event(struct perf_event *event)
647{ 647{
648 return map_cpu_event(event, &armv6_perf_map, 648 return armpmu_map_event(event, &armv6_perf_map,
649 &armv6_perf_cache_map, 0xFF); 649 &armv6_perf_cache_map, 0xFF);
650} 650}
651 651
@@ -679,7 +679,7 @@ static struct arm_pmu *__devinit armv6pmu_init(void)
679 679
680static int armv6mpcore_map_event(struct perf_event *event) 680static int armv6mpcore_map_event(struct perf_event *event)
681{ 681{
682 return map_cpu_event(event, &armv6mpcore_perf_map, 682 return armpmu_map_event(event, &armv6mpcore_perf_map,
683 &armv6mpcore_perf_cache_map, 0xFF); 683 &armv6mpcore_perf_cache_map, 0xFF);
684} 684}
685 685
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index d65a1b82e13f..bd4b090ebcfd 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1204,31 +1204,31 @@ static void armv7pmu_reset(void *info)
1204 1204
1205static int armv7_a8_map_event(struct perf_event *event) 1205static int armv7_a8_map_event(struct perf_event *event)
1206{ 1206{
1207 return map_cpu_event(event, &armv7_a8_perf_map, 1207 return armpmu_map_event(event, &armv7_a8_perf_map,
1208 &armv7_a8_perf_cache_map, 0xFF); 1208 &armv7_a8_perf_cache_map, 0xFF);
1209} 1209}
1210 1210
1211static int armv7_a9_map_event(struct perf_event *event) 1211static int armv7_a9_map_event(struct perf_event *event)
1212{ 1212{
1213 return map_cpu_event(event, &armv7_a9_perf_map, 1213 return armpmu_map_event(event, &armv7_a9_perf_map,
1214 &armv7_a9_perf_cache_map, 0xFF); 1214 &armv7_a9_perf_cache_map, 0xFF);
1215} 1215}
1216 1216
1217static int armv7_a5_map_event(struct perf_event *event) 1217static int armv7_a5_map_event(struct perf_event *event)
1218{ 1218{
1219 return map_cpu_event(event, &armv7_a5_perf_map, 1219 return armpmu_map_event(event, &armv7_a5_perf_map,
1220 &armv7_a5_perf_cache_map, 0xFF); 1220 &armv7_a5_perf_cache_map, 0xFF);
1221} 1221}
1222 1222
1223static int armv7_a15_map_event(struct perf_event *event) 1223static int armv7_a15_map_event(struct perf_event *event)
1224{ 1224{
1225 return map_cpu_event(event, &armv7_a15_perf_map, 1225 return armpmu_map_event(event, &armv7_a15_perf_map,
1226 &armv7_a15_perf_cache_map, 0xFF); 1226 &armv7_a15_perf_cache_map, 0xFF);
1227} 1227}
1228 1228
1229static int armv7_a7_map_event(struct perf_event *event) 1229static int armv7_a7_map_event(struct perf_event *event)
1230{ 1230{
1231 return map_cpu_event(event, &armv7_a7_perf_map, 1231 return armpmu_map_event(event, &armv7_a7_perf_map,
1232 &armv7_a7_perf_cache_map, 0xFF); 1232 &armv7_a7_perf_cache_map, 0xFF);
1233} 1233}
1234 1234
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index dcc478c07456..426e19f380a2 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -430,7 +430,7 @@ xscale1pmu_write_counter(int counter, u32 val)
430 430
431static int xscale_map_event(struct perf_event *event) 431static int xscale_map_event(struct perf_event *event)
432{ 432{
433 return map_cpu_event(event, &xscale_perf_map, 433 return armpmu_map_event(event, &xscale_perf_map,
434 &xscale_perf_cache_map, 0xFF); 434 &xscale_perf_cache_map, 0xFF);
435} 435}
436 436