aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2012-07-29 08:09:14 -0400
committerWill Deacon <will.deacon@arm.com>2012-08-23 06:35:52 -0400
commit5505b206ca006d0506d1d3b3c494aa86234f66e2 (patch)
treec9fa078345110c7eeab9df768aba3120c98038da
parent6dbc00297095122ea89e016ce6affad0b7c0ddac (diff)
ARM: perf: move CPU-specific PMU handling code into separate file
This patch moves the CPU-specific PMU handling code out of perf_event.c and into perf_event_cpu.c. Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/perf_event.c211
-rw-r--r--arch/arm/kernel/perf_event_cpu.c231
3 files changed, 233 insertions, 211 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index cfbe3b4d1d5e..1c4321430737 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -69,7 +69,7 @@ obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
69obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o 69obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
70obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o 70obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
71obj-$(CONFIG_IWMMXT) += iwmmxt.o 71obj-$(CONFIG_IWMMXT) += iwmmxt.o
72obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o 72obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_event_cpu.o
73AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt 73AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
74obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o 74obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
75 75
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 9e3afd1994d9..86fd39937171 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -12,54 +12,15 @@
12 */ 12 */
13#define pr_fmt(fmt) "hw perfevents: " fmt 13#define pr_fmt(fmt) "hw perfevents: " fmt
14 14
15#include <linux/bitmap.h>
16#include <linux/interrupt.h>
17#include <linux/kernel.h> 15#include <linux/kernel.h>
18#include <linux/export.h>
19#include <linux/of.h>
20#include <linux/perf_event.h>
21#include <linux/platform_device.h> 16#include <linux/platform_device.h>
22#include <linux/spinlock.h>
23#include <linux/uaccess.h>
24#include <linux/pm_runtime.h> 17#include <linux/pm_runtime.h>
18#include <linux/uaccess.h>
25 19
26#include <asm/cputype.h>
27#include <asm/irq.h>
28#include <asm/irq_regs.h> 20#include <asm/irq_regs.h>
29#include <asm/pmu.h> 21#include <asm/pmu.h>
30#include <asm/stacktrace.h> 22#include <asm/stacktrace.h>
31 23
32/* Set at runtime when we know what CPU type we are. */
33static struct arm_pmu *cpu_pmu;
34
35static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
36static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
37static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
38
39/*
40 * Despite the names, these two functions are CPU-specific and are used
41 * by the OProfile/perf code.
42 */
43const char *perf_pmu_name(void)
44{
45 if (!cpu_pmu)
46 return NULL;
47
48 return cpu_pmu->pmu.name;
49}
50EXPORT_SYMBOL_GPL(perf_pmu_name);
51
52int perf_num_counters(void)
53{
54 int max_events = 0;
55
56 if (cpu_pmu != NULL)
57 max_events = cpu_pmu->num_events;
58
59 return max_events;
60}
61EXPORT_SYMBOL_GPL(perf_num_counters);
62
63static int 24static int
64armpmu_map_cache_event(const unsigned (*cache_map) 25armpmu_map_cache_event(const unsigned (*cache_map)
65 [PERF_COUNT_HW_CACHE_MAX] 26 [PERF_COUNT_HW_CACHE_MAX]
@@ -608,176 +569,6 @@ int armpmu_register(struct arm_pmu *armpmu, char *name, int type)
608 return perf_pmu_register(&armpmu->pmu, name, type); 569 return perf_pmu_register(&armpmu->pmu, name, type);
609} 570}
610 571
611/* Include the PMU-specific implementations. */
612#include "perf_event_xscale.c"
613#include "perf_event_v6.c"
614#include "perf_event_v7.c"
615
616static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
617{
618 return &__get_cpu_var(cpu_hw_events);
619}
620
621static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu)
622{
623 int cpu;
624 for_each_possible_cpu(cpu) {
625 struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
626 events->events = per_cpu(hw_events, cpu);
627 events->used_mask = per_cpu(used_mask, cpu);
628 raw_spin_lock_init(&events->pmu_lock);
629 }
630 cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events;
631
632 /* Ensure the PMU has sane values out of reset. */
633 if (cpu_pmu && cpu_pmu->reset)
634 on_each_cpu(cpu_pmu->reset, NULL, 1);
635}
636
637/*
638 * PMU hardware loses all context when a CPU goes offline.
639 * When a CPU is hotplugged back in, since some hardware registers are
640 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
641 * junk values out of them.
642 */
643static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
644 unsigned long action, void *hcpu)
645{
646 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
647 return NOTIFY_DONE;
648
649 if (cpu_pmu && cpu_pmu->reset)
650 cpu_pmu->reset(NULL);
651
652 return NOTIFY_OK;
653}
654
655static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
656 .notifier_call = cpu_pmu_notify,
657};
658
659/*
660 * PMU platform driver and devicetree bindings.
661 */
662static struct of_device_id __devinitdata cpu_pmu_of_device_ids[] = {
663 {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init},
664 {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init},
665 {.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init},
666 {.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init},
667 {.compatible = "arm,cortex-a5-pmu", .data = armv7_a5_pmu_init},
668 {.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init},
669 {.compatible = "arm,arm1176-pmu", .data = armv6pmu_init},
670 {.compatible = "arm,arm1136-pmu", .data = armv6pmu_init},
671 {},
672};
673
674static struct platform_device_id __devinitdata cpu_pmu_plat_device_ids[] = {
675 {.name = "arm-pmu"},
676 {},
677};
678
679/*
680 * CPU PMU identification and probing.
681 */
682static struct arm_pmu *__devinit probe_current_pmu(void)
683{
684 struct arm_pmu *pmu = NULL;
685 int cpu = get_cpu();
686 unsigned long cpuid = read_cpuid_id();
687 unsigned long implementor = (cpuid & 0xFF000000) >> 24;
688 unsigned long part_number = (cpuid & 0xFFF0);
689
690 pr_info("probing PMU on CPU %d\n", cpu);
691
692 /* ARM Ltd CPUs. */
693 if (0x41 == implementor) {
694 switch (part_number) {
695 case 0xB360: /* ARM1136 */
696 case 0xB560: /* ARM1156 */
697 case 0xB760: /* ARM1176 */
698 pmu = armv6pmu_init();
699 break;
700 case 0xB020: /* ARM11mpcore */
701 pmu = armv6mpcore_pmu_init();
702 break;
703 case 0xC080: /* Cortex-A8 */
704 pmu = armv7_a8_pmu_init();
705 break;
706 case 0xC090: /* Cortex-A9 */
707 pmu = armv7_a9_pmu_init();
708 break;
709 case 0xC050: /* Cortex-A5 */
710 pmu = armv7_a5_pmu_init();
711 break;
712 case 0xC0F0: /* Cortex-A15 */
713 pmu = armv7_a15_pmu_init();
714 break;
715 case 0xC070: /* Cortex-A7 */
716 pmu = armv7_a7_pmu_init();
717 break;
718 }
719 /* Intel CPUs [xscale]. */
720 } else if (0x69 == implementor) {
721 part_number = (cpuid >> 13) & 0x7;
722 switch (part_number) {
723 case 1:
724 pmu = xscale1pmu_init();
725 break;
726 case 2:
727 pmu = xscale2pmu_init();
728 break;
729 }
730 }
731
732 put_cpu();
733 return pmu;
734}
735
736static int __devinit cpu_pmu_device_probe(struct platform_device *pdev)
737{
738 const struct of_device_id *of_id;
739 struct arm_pmu *(*init_fn)(void);
740 struct device_node *node = pdev->dev.of_node;
741
742 if (cpu_pmu) {
743 pr_info("attempt to register multiple PMU devices!");
744 return -ENOSPC;
745 }
746
747 if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
748 init_fn = of_id->data;
749 cpu_pmu = init_fn();
750 } else {
751 cpu_pmu = probe_current_pmu();
752 }
753
754 if (!cpu_pmu)
755 return -ENODEV;
756
757 cpu_pmu->plat_device = pdev;
758 cpu_pmu_init(cpu_pmu);
759 register_cpu_notifier(&cpu_pmu_hotplug_notifier);
760 armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW);
761
762 return 0;
763}
764
765static struct platform_driver cpu_pmu_driver = {
766 .driver = {
767 .name = "arm-pmu",
768 .pm = &armpmu_dev_pm_ops,
769 .of_match_table = cpu_pmu_of_device_ids,
770 },
771 .probe = cpu_pmu_device_probe,
772 .id_table = cpu_pmu_plat_device_ids,
773};
774
775static int __init register_pmu_driver(void)
776{
777 return platform_driver_register(&cpu_pmu_driver);
778}
779device_initcall(register_pmu_driver);
780
781/* 572/*
782 * Callchain handling code. 573 * Callchain handling code.
783 */ 574 */
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
new file mode 100644
index 000000000000..56ddc989c909
--- /dev/null
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -0,0 +1,231 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
14 *
15 * Copyright (C) 2012 ARM Limited
16 *
17 * Author: Will Deacon <will.deacon@arm.com>
18 */
19#define pr_fmt(fmt) "CPU PMU: " fmt
20
21#include <linux/bitmap.h>
22#include <linux/export.h>
23#include <linux/kernel.h>
24#include <linux/of.h>
25#include <linux/platform_device.h>
26#include <linux/spinlock.h>
27
28#include <asm/cputype.h>
29#include <asm/irq_regs.h>
30#include <asm/pmu.h>
31
32/* Set at runtime when we know what CPU type we are. */
33static struct arm_pmu *cpu_pmu;
34
35static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
36static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
37static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
38
39/*
40 * Despite the names, these two functions are CPU-specific and are used
41 * by the OProfile/perf code.
42 */
43const char *perf_pmu_name(void)
44{
45 if (!cpu_pmu)
46 return NULL;
47
48 return cpu_pmu->pmu.name;
49}
50EXPORT_SYMBOL_GPL(perf_pmu_name);
51
52int perf_num_counters(void)
53{
54 int max_events = 0;
55
56 if (cpu_pmu != NULL)
57 max_events = cpu_pmu->num_events;
58
59 return max_events;
60}
61EXPORT_SYMBOL_GPL(perf_num_counters);
62
63/* Include the PMU-specific implementations. */
64#include "perf_event_xscale.c"
65#include "perf_event_v6.c"
66#include "perf_event_v7.c"
67
68static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
69{
70 return &__get_cpu_var(cpu_hw_events);
71}
72
73static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu)
74{
75 int cpu;
76 for_each_possible_cpu(cpu) {
77 struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
78 events->events = per_cpu(hw_events, cpu);
79 events->used_mask = per_cpu(used_mask, cpu);
80 raw_spin_lock_init(&events->pmu_lock);
81 }
82 cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events;
83
84 /* Ensure the PMU has sane values out of reset. */
85 if (cpu_pmu && cpu_pmu->reset)
86 on_each_cpu(cpu_pmu->reset, NULL, 1);
87}
88
89/*
90 * PMU hardware loses all context when a CPU goes offline.
91 * When a CPU is hotplugged back in, since some hardware registers are
92 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
93 * junk values out of them.
94 */
95static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
96 unsigned long action, void *hcpu)
97{
98 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
99 return NOTIFY_DONE;
100
101 if (cpu_pmu && cpu_pmu->reset)
102 cpu_pmu->reset(NULL);
103
104 return NOTIFY_OK;
105}
106
107static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
108 .notifier_call = cpu_pmu_notify,
109};
110
111/*
112 * PMU platform driver and devicetree bindings.
113 */
114static struct of_device_id __devinitdata cpu_pmu_of_device_ids[] = {
115 {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init},
116 {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init},
117 {.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init},
118 {.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init},
119 {.compatible = "arm,cortex-a5-pmu", .data = armv7_a5_pmu_init},
120 {.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init},
121 {.compatible = "arm,arm1176-pmu", .data = armv6pmu_init},
122 {.compatible = "arm,arm1136-pmu", .data = armv6pmu_init},
123 {},
124};
125
126static struct platform_device_id __devinitdata cpu_pmu_plat_device_ids[] = {
127 {.name = "arm-pmu"},
128 {},
129};
130
131/*
132 * CPU PMU identification and probing.
133 */
134static struct arm_pmu *__devinit probe_current_pmu(void)
135{
136 struct arm_pmu *pmu = NULL;
137 int cpu = get_cpu();
138 unsigned long cpuid = read_cpuid_id();
139 unsigned long implementor = (cpuid & 0xFF000000) >> 24;
140 unsigned long part_number = (cpuid & 0xFFF0);
141
142 pr_info("probing PMU on CPU %d\n", cpu);
143
144 /* ARM Ltd CPUs. */
145 if (0x41 == implementor) {
146 switch (part_number) {
147 case 0xB360: /* ARM1136 */
148 case 0xB560: /* ARM1156 */
149 case 0xB760: /* ARM1176 */
150 pmu = armv6pmu_init();
151 break;
152 case 0xB020: /* ARM11mpcore */
153 pmu = armv6mpcore_pmu_init();
154 break;
155 case 0xC080: /* Cortex-A8 */
156 pmu = armv7_a8_pmu_init();
157 break;
158 case 0xC090: /* Cortex-A9 */
159 pmu = armv7_a9_pmu_init();
160 break;
161 case 0xC050: /* Cortex-A5 */
162 pmu = armv7_a5_pmu_init();
163 break;
164 case 0xC0F0: /* Cortex-A15 */
165 pmu = armv7_a15_pmu_init();
166 break;
167 case 0xC070: /* Cortex-A7 */
168 pmu = armv7_a7_pmu_init();
169 break;
170 }
171 /* Intel CPUs [xscale]. */
172 } else if (0x69 == implementor) {
173 part_number = (cpuid >> 13) & 0x7;
174 switch (part_number) {
175 case 1:
176 pmu = xscale1pmu_init();
177 break;
178 case 2:
179 pmu = xscale2pmu_init();
180 break;
181 }
182 }
183
184 put_cpu();
185 return pmu;
186}
187
188static int __devinit cpu_pmu_device_probe(struct platform_device *pdev)
189{
190 const struct of_device_id *of_id;
191 struct arm_pmu *(*init_fn)(void);
192 struct device_node *node = pdev->dev.of_node;
193
194 if (cpu_pmu) {
195 pr_info("attempt to register multiple PMU devices!");
196 return -ENOSPC;
197 }
198
199 if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
200 init_fn = of_id->data;
201 cpu_pmu = init_fn();
202 } else {
203 cpu_pmu = probe_current_pmu();
204 }
205
206 if (!cpu_pmu)
207 return -ENODEV;
208
209 cpu_pmu->plat_device = pdev;
210 cpu_pmu_init(cpu_pmu);
211 register_cpu_notifier(&cpu_pmu_hotplug_notifier);
212 armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW);
213
214 return 0;
215}
216
217static struct platform_driver cpu_pmu_driver = {
218 .driver = {
219 .name = "arm-pmu",
220 .pm = &armpmu_dev_pm_ops,
221 .of_match_table = cpu_pmu_of_device_ids,
222 },
223 .probe = cpu_pmu_device_probe,
224 .id_table = cpu_pmu_plat_device_ids,
225};
226
227static int __init register_pmu_driver(void)
228{
229 return platform_driver_register(&cpu_pmu_driver);
230}
231device_initcall(register_pmu_driver);