summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2014-03-07 09:42:35 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-03-07 09:42:35 -0500
commit3ba4cea21901d90d703b52e4a806fbafa86037a6 (patch)
treeb556d880492a783861a39c8197e35d56ac4c77f9 /arch/arm/kernel
parenta2e6177c931793b4ffb30e722fce6fc7aaff9fa5 (diff)
parent8e781f65423c2e8e65a56972ba996b6c01a5ef3e (diff)
Merge branch 'for-rmk/perf' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into devel-stable
* Support for Qualcomm Krait processors (run perf on your phone!) * Support for Cortex-A12 (run perf stat on your FPGA!) * Support for perf_sample_event_took, allowing us to automatically decrease the sample rate if we can't handle the PMU interrupts quickly enough (run perf record on your FPGA!). As part of the Krait support, we also gain support for PPI generation by the PMU.
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/perf_event.c27
-rw-r--r--arch/arm/kernel/perf_event_cpu.c113
-rw-r--r--arch/arm/kernel/perf_event_v7.c717
3 files changed, 813 insertions, 44 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 789d846a9184..a6bc431cde70 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -16,6 +16,8 @@
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/pm_runtime.h> 17#include <linux/pm_runtime.h>
18#include <linux/uaccess.h> 18#include <linux/uaccess.h>
19#include <linux/irq.h>
20#include <linux/irqdesc.h>
19 21
20#include <asm/irq_regs.h> 22#include <asm/irq_regs.h>
21#include <asm/pmu.h> 23#include <asm/pmu.h>
@@ -205,6 +207,8 @@ armpmu_del(struct perf_event *event, int flags)
205 armpmu_stop(event, PERF_EF_UPDATE); 207 armpmu_stop(event, PERF_EF_UPDATE);
206 hw_events->events[idx] = NULL; 208 hw_events->events[idx] = NULL;
207 clear_bit(idx, hw_events->used_mask); 209 clear_bit(idx, hw_events->used_mask);
210 if (armpmu->clear_event_idx)
211 armpmu->clear_event_idx(hw_events, event);
208 212
209 perf_event_update_userpage(event); 213 perf_event_update_userpage(event);
210} 214}
@@ -295,14 +299,27 @@ validate_group(struct perf_event *event)
295 299
296static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) 300static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
297{ 301{
298 struct arm_pmu *armpmu = (struct arm_pmu *) dev; 302 struct arm_pmu *armpmu;
299 struct platform_device *plat_device = armpmu->plat_device; 303 struct platform_device *plat_device;
300 struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev); 304 struct arm_pmu_platdata *plat;
305 int ret;
306 u64 start_clock, finish_clock;
301 307
308 if (irq_is_percpu(irq))
309 dev = *(void **)dev;
310 armpmu = dev;
311 plat_device = armpmu->plat_device;
312 plat = dev_get_platdata(&plat_device->dev);
313
314 start_clock = sched_clock();
302 if (plat && plat->handle_irq) 315 if (plat && plat->handle_irq)
303 return plat->handle_irq(irq, dev, armpmu->handle_irq); 316 ret = plat->handle_irq(irq, dev, armpmu->handle_irq);
304 else 317 else
305 return armpmu->handle_irq(irq, dev); 318 ret = armpmu->handle_irq(irq, dev);
319 finish_clock = sched_clock();
320
321 perf_sample_event_took(finish_clock - start_clock);
322 return ret;
306} 323}
307 324
308static void 325static void
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 20d553c9f5e2..51798d7854ac 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -25,6 +25,8 @@
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/irq.h>
29#include <linux/irqdesc.h>
28 30
29#include <asm/cputype.h> 31#include <asm/cputype.h>
30#include <asm/irq_regs.h> 32#include <asm/irq_regs.h>
@@ -33,6 +35,7 @@
33/* Set at runtime when we know what CPU type we are. */ 35/* Set at runtime when we know what CPU type we are. */
34static struct arm_pmu *cpu_pmu; 36static struct arm_pmu *cpu_pmu;
35 37
38static DEFINE_PER_CPU(struct arm_pmu *, percpu_pmu);
36static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); 39static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
37static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); 40static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
38static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); 41static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
@@ -71,6 +74,26 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
71 return this_cpu_ptr(&cpu_hw_events); 74 return this_cpu_ptr(&cpu_hw_events);
72} 75}
73 76
77static void cpu_pmu_enable_percpu_irq(void *data)
78{
79 struct arm_pmu *cpu_pmu = data;
80 struct platform_device *pmu_device = cpu_pmu->plat_device;
81 int irq = platform_get_irq(pmu_device, 0);
82
83 enable_percpu_irq(irq, IRQ_TYPE_NONE);
84 cpumask_set_cpu(smp_processor_id(), &cpu_pmu->active_irqs);
85}
86
87static void cpu_pmu_disable_percpu_irq(void *data)
88{
89 struct arm_pmu *cpu_pmu = data;
90 struct platform_device *pmu_device = cpu_pmu->plat_device;
91 int irq = platform_get_irq(pmu_device, 0);
92
93 cpumask_clear_cpu(smp_processor_id(), &cpu_pmu->active_irqs);
94 disable_percpu_irq(irq);
95}
96
74static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) 97static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
75{ 98{
76 int i, irq, irqs; 99 int i, irq, irqs;
@@ -78,12 +101,18 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
78 101
79 irqs = min(pmu_device->num_resources, num_possible_cpus()); 102 irqs = min(pmu_device->num_resources, num_possible_cpus());
80 103
81 for (i = 0; i < irqs; ++i) { 104 irq = platform_get_irq(pmu_device, 0);
82 if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs)) 105 if (irq >= 0 && irq_is_percpu(irq)) {
83 continue; 106 on_each_cpu(cpu_pmu_disable_percpu_irq, cpu_pmu, 1);
84 irq = platform_get_irq(pmu_device, i); 107 free_percpu_irq(irq, &percpu_pmu);
85 if (irq >= 0) 108 } else {
86 free_irq(irq, cpu_pmu); 109 for (i = 0; i < irqs; ++i) {
110 if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
111 continue;
112 irq = platform_get_irq(pmu_device, i);
113 if (irq >= 0)
114 free_irq(irq, cpu_pmu);
115 }
87 } 116 }
88} 117}
89 118
@@ -101,33 +130,44 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
101 return -ENODEV; 130 return -ENODEV;
102 } 131 }
103 132
104 for (i = 0; i < irqs; ++i) { 133 irq = platform_get_irq(pmu_device, 0);
105 err = 0; 134 if (irq >= 0 && irq_is_percpu(irq)) {
106 irq = platform_get_irq(pmu_device, i); 135 err = request_percpu_irq(irq, handler, "arm-pmu", &percpu_pmu);
107 if (irq < 0)
108 continue;
109
110 /*
111 * If we have a single PMU interrupt that we can't shift,
112 * assume that we're running on a uniprocessor machine and
113 * continue. Otherwise, continue without this interrupt.
114 */
115 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
116 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
117 irq, i);
118 continue;
119 }
120
121 err = request_irq(irq, handler,
122 IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
123 cpu_pmu);
124 if (err) { 136 if (err) {
125 pr_err("unable to request IRQ%d for ARM PMU counters\n", 137 pr_err("unable to request IRQ%d for ARM PMU counters\n",
126 irq); 138 irq);
127 return err; 139 return err;
128 } 140 }
129 141 on_each_cpu(cpu_pmu_enable_percpu_irq, cpu_pmu, 1);
130 cpumask_set_cpu(i, &cpu_pmu->active_irqs); 142 } else {
143 for (i = 0; i < irqs; ++i) {
144 err = 0;
145 irq = platform_get_irq(pmu_device, i);
146 if (irq < 0)
147 continue;
148
149 /*
150 * If we have a single PMU interrupt that we can't shift,
151 * assume that we're running on a uniprocessor machine and
152 * continue. Otherwise, continue without this interrupt.
153 */
154 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
155 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
156 irq, i);
157 continue;
158 }
159
160 err = request_irq(irq, handler,
161 IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
162 cpu_pmu);
163 if (err) {
164 pr_err("unable to request IRQ%d for ARM PMU counters\n",
165 irq);
166 return err;
167 }
168
169 cpumask_set_cpu(i, &cpu_pmu->active_irqs);
170 }
131 } 171 }
132 172
133 return 0; 173 return 0;
@@ -141,6 +181,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
141 events->events = per_cpu(hw_events, cpu); 181 events->events = per_cpu(hw_events, cpu);
142 events->used_mask = per_cpu(used_mask, cpu); 182 events->used_mask = per_cpu(used_mask, cpu);
143 raw_spin_lock_init(&events->pmu_lock); 183 raw_spin_lock_init(&events->pmu_lock);
184 per_cpu(percpu_pmu, cpu) = cpu_pmu;
144 } 185 }
145 186
146 cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events; 187 cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events;
@@ -181,6 +222,7 @@ static struct notifier_block cpu_pmu_hotplug_notifier = {
181 */ 222 */
182static struct of_device_id cpu_pmu_of_device_ids[] = { 223static struct of_device_id cpu_pmu_of_device_ids[] = {
183 {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init}, 224 {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init},
225 {.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init},
184 {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init}, 226 {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init},
185 {.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init}, 227 {.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init},
186 {.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init}, 228 {.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init},
@@ -188,6 +230,7 @@ static struct of_device_id cpu_pmu_of_device_ids[] = {
188 {.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init}, 230 {.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init},
189 {.compatible = "arm,arm1176-pmu", .data = armv6pmu_init}, 231 {.compatible = "arm,arm1176-pmu", .data = armv6pmu_init},
190 {.compatible = "arm,arm1136-pmu", .data = armv6pmu_init}, 232 {.compatible = "arm,arm1136-pmu", .data = armv6pmu_init},
233 {.compatible = "qcom,krait-pmu", .data = krait_pmu_init},
191 {}, 234 {},
192}; 235};
193 236
@@ -225,15 +268,6 @@ static int probe_current_pmu(struct arm_pmu *pmu)
225 case ARM_CPU_PART_CORTEX_A9: 268 case ARM_CPU_PART_CORTEX_A9:
226 ret = armv7_a9_pmu_init(pmu); 269 ret = armv7_a9_pmu_init(pmu);
227 break; 270 break;
228 case ARM_CPU_PART_CORTEX_A5:
229 ret = armv7_a5_pmu_init(pmu);
230 break;
231 case ARM_CPU_PART_CORTEX_A15:
232 ret = armv7_a15_pmu_init(pmu);
233 break;
234 case ARM_CPU_PART_CORTEX_A7:
235 ret = armv7_a7_pmu_init(pmu);
236 break;
237 } 271 }
238 /* Intel CPUs [xscale]. */ 272 /* Intel CPUs [xscale]. */
239 } else if (implementor == ARM_CPU_IMP_INTEL) { 273 } else if (implementor == ARM_CPU_IMP_INTEL) {
@@ -270,6 +304,9 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
270 return -ENOMEM; 304 return -ENOMEM;
271 } 305 }
272 306
307 cpu_pmu = pmu;
308 cpu_pmu->plat_device = pdev;
309
273 if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { 310 if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
274 init_fn = of_id->data; 311 init_fn = of_id->data;
275 ret = init_fn(pmu); 312 ret = init_fn(pmu);
@@ -282,8 +319,6 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
282 goto out_free; 319 goto out_free;
283 } 320 }
284 321
285 cpu_pmu = pmu;
286 cpu_pmu->plat_device = pdev;
287 cpu_pmu_init(cpu_pmu); 322 cpu_pmu_init(cpu_pmu);
288 ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW); 323 ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW);
289 324
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 039cffb053a7..f4ef3981ed02 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -18,6 +18,10 @@
18 18
19#ifdef CONFIG_CPU_V7 19#ifdef CONFIG_CPU_V7
20 20
21#include <asm/cp15.h>
22#include <asm/vfp.h>
23#include "../vfp/vfpinstr.h"
24
21/* 25/*
22 * Common ARMv7 event types 26 * Common ARMv7 event types
23 * 27 *
@@ -109,6 +113,33 @@ enum armv7_a15_perf_types {
109 ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76, 113 ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76,
110}; 114};
111 115
116/* ARMv7 Cortex-A12 specific event types */
117enum armv7_a12_perf_types {
118 ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
119 ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
120
121 ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
122 ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
123
124 ARMV7_A12_PERFCTR_PC_WRITE_SPEC = 0x76,
125
126 ARMV7_A12_PERFCTR_PF_TLB_REFILL = 0xe7,
127};
128
129/* ARMv7 Krait specific event types */
130enum krait_perf_types {
131 KRAIT_PMRESR0_GROUP0 = 0xcc,
132 KRAIT_PMRESR1_GROUP0 = 0xd0,
133 KRAIT_PMRESR2_GROUP0 = 0xd4,
134 KRAIT_VPMRESR0_GROUP0 = 0xd8,
135
136 KRAIT_PERFCTR_L1_ICACHE_ACCESS = 0x10011,
137 KRAIT_PERFCTR_L1_ICACHE_MISS = 0x10010,
138
139 KRAIT_PERFCTR_L1_ITLB_ACCESS = 0x12222,
140 KRAIT_PERFCTR_L1_DTLB_ACCESS = 0x12210,
141};
142
112/* 143/*
113 * Cortex-A8 HW events mapping 144 * Cortex-A8 HW events mapping
114 * 145 *
@@ -732,6 +763,262 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
732}; 763};
733 764
734/* 765/*
766 * Cortex-A12 HW events mapping
767 */
768static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
769 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
770 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
771 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
772 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
773 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
774 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
775 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
776 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
777 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
778};
779
780static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
781 [PERF_COUNT_HW_CACHE_OP_MAX]
782 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
783 [C(L1D)] = {
784 [C(OP_READ)] = {
785 [C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
786 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
787 },
788 [C(OP_WRITE)] = {
789 [C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
790 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
791 },
792 [C(OP_PREFETCH)] = {
793 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
794 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
795 },
796 },
797 [C(L1I)] = {
798 /*
799 * Not all performance counters differentiate between read
800 * and write accesses/misses so we're not always strictly
801 * correct, but it's the best we can do. Writes and reads get
802 * combined in these cases.
803 */
804 [C(OP_READ)] = {
805 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
806 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
807 },
808 [C(OP_WRITE)] = {
809 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
810 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
811 },
812 [C(OP_PREFETCH)] = {
813 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
814 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
815 },
816 },
817 [C(LL)] = {
818 [C(OP_READ)] = {
819 [C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
820 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
821 },
822 [C(OP_WRITE)] = {
823 [C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
824 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
825 },
826 [C(OP_PREFETCH)] = {
827 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
828 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
829 },
830 },
831 [C(DTLB)] = {
832 [C(OP_READ)] = {
833 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
834 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
835 },
836 [C(OP_WRITE)] = {
837 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
838 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
839 },
840 [C(OP_PREFETCH)] = {
841 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
842 [C(RESULT_MISS)] = ARMV7_A12_PERFCTR_PF_TLB_REFILL,
843 },
844 },
845 [C(ITLB)] = {
846 [C(OP_READ)] = {
847 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
848 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
849 },
850 [C(OP_WRITE)] = {
851 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
852 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
853 },
854 [C(OP_PREFETCH)] = {
855 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
856 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
857 },
858 },
859 [C(BPU)] = {
860 [C(OP_READ)] = {
861 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
862 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
863 },
864 [C(OP_WRITE)] = {
865 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
866 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
867 },
868 [C(OP_PREFETCH)] = {
869 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
870 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
871 },
872 },
873 [C(NODE)] = {
874 [C(OP_READ)] = {
875 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
876 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
877 },
878 [C(OP_WRITE)] = {
879 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
880 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
881 },
882 [C(OP_PREFETCH)] = {
883 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
884 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
885 },
886 },
887};
888
889/*
890 * Krait HW events mapping
891 */
892static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
893 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
894 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
895 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
896 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
897 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
898 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
899 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
900};
901
902static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
903 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
904 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
905 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
906 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
907 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED,
908 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
909 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
910};
911
912static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
913 [PERF_COUNT_HW_CACHE_OP_MAX]
914 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
915 [C(L1D)] = {
916 /*
917 * The performance counters don't differentiate between read
918 * and write accesses/misses so this isn't strictly correct,
919 * but it's the best we can do. Writes and reads get
920 * combined.
921 */
922 [C(OP_READ)] = {
923 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
924 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
925 },
926 [C(OP_WRITE)] = {
927 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
928 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
929 },
930 [C(OP_PREFETCH)] = {
931 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
932 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
933 },
934 },
935 [C(L1I)] = {
936 [C(OP_READ)] = {
937 [C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ICACHE_ACCESS,
938 [C(RESULT_MISS)] = KRAIT_PERFCTR_L1_ICACHE_MISS,
939 },
940 [C(OP_WRITE)] = {
941 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
942 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
943 },
944 [C(OP_PREFETCH)] = {
945 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
946 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
947 },
948 },
949 [C(LL)] = {
950 [C(OP_READ)] = {
951 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
952 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
953 },
954 [C(OP_WRITE)] = {
955 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
956 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
957 },
958 [C(OP_PREFETCH)] = {
959 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
960 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
961 },
962 },
963 [C(DTLB)] = {
964 [C(OP_READ)] = {
965 [C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
966 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
967 },
968 [C(OP_WRITE)] = {
969 [C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
970 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
971 },
972 [C(OP_PREFETCH)] = {
973 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
974 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
975 },
976 },
977 [C(ITLB)] = {
978 [C(OP_READ)] = {
979 [C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
980 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
981 },
982 [C(OP_WRITE)] = {
983 [C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
984 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
985 },
986 [C(OP_PREFETCH)] = {
987 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
988 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
989 },
990 },
991 [C(BPU)] = {
992 [C(OP_READ)] = {
993 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
994 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
995 },
996 [C(OP_WRITE)] = {
997 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
998 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
999 },
1000 [C(OP_PREFETCH)] = {
1001 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1002 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1003 },
1004 },
1005 [C(NODE)] = {
1006 [C(OP_READ)] = {
1007 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1008 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1009 },
1010 [C(OP_WRITE)] = {
1011 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1012 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1013 },
1014 [C(OP_PREFETCH)] = {
1015 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1016 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1017 },
1018 },
1019};
1020
1021/*
735 * Perf Events' indices 1022 * Perf Events' indices
736 */ 1023 */
737#define ARMV7_IDX_CYCLE_COUNTER 0 1024#define ARMV7_IDX_CYCLE_COUNTER 0
@@ -1212,6 +1499,24 @@ static int armv7_a7_map_event(struct perf_event *event)
1212 &armv7_a7_perf_cache_map, 0xFF); 1499 &armv7_a7_perf_cache_map, 0xFF);
1213} 1500}
1214 1501
1502static int armv7_a12_map_event(struct perf_event *event)
1503{
1504 return armpmu_map_event(event, &armv7_a12_perf_map,
1505 &armv7_a12_perf_cache_map, 0xFF);
1506}
1507
1508static int krait_map_event(struct perf_event *event)
1509{
1510 return armpmu_map_event(event, &krait_perf_map,
1511 &krait_perf_cache_map, 0xFFFFF);
1512}
1513
1514static int krait_map_event_no_branch(struct perf_event *event)
1515{
1516 return armpmu_map_event(event, &krait_perf_map_no_branch,
1517 &krait_perf_cache_map, 0xFFFFF);
1518}
1519
1215static void armv7pmu_init(struct arm_pmu *cpu_pmu) 1520static void armv7pmu_init(struct arm_pmu *cpu_pmu)
1216{ 1521{
1217 cpu_pmu->handle_irq = armv7pmu_handle_irq; 1522 cpu_pmu->handle_irq = armv7pmu_handle_irq;
@@ -1283,6 +1588,408 @@ static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1283 cpu_pmu->set_event_filter = armv7pmu_set_event_filter; 1588 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1284 return 0; 1589 return 0;
1285} 1590}
1591
1592static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
1593{
1594 armv7pmu_init(cpu_pmu);
1595 cpu_pmu->name = "ARMv7 Cortex-A12";
1596 cpu_pmu->map_event = armv7_a12_map_event;
1597 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1598 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1599 return 0;
1600}
1601
1602/*
1603 * Krait Performance Monitor Region Event Selection Register (PMRESRn)
1604 *
1605 * 31 30 24 16 8 0
1606 * +--------------------------------+
1607 * PMRESR0 | EN | CC | CC | CC | CC | N = 1, R = 0
1608 * +--------------------------------+
1609 * PMRESR1 | EN | CC | CC | CC | CC | N = 1, R = 1
1610 * +--------------------------------+
1611 * PMRESR2 | EN | CC | CC | CC | CC | N = 1, R = 2
1612 * +--------------------------------+
1613 * VPMRESR0 | EN | CC | CC | CC | CC | N = 2, R = ?
1614 * +--------------------------------+
1615 * EN | G=3 | G=2 | G=1 | G=0
1616 *
1617 * Event Encoding:
1618 *
1619 * hwc->config_base = 0xNRCCG
1620 *
1621 * N = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
1622 * R = region register
1623 * CC = class of events the group G is choosing from
1624 * G = group or particular event
1625 *
1626 * Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
1627 *
1628 * A region (R) corresponds to a piece of the CPU (execution unit, instruction
1629 * unit, etc.) while the event code (CC) corresponds to a particular class of
1630 * events (interrupts for example). An event code is broken down into
1631 * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1632 * example).
1633 */
1634
1635#define KRAIT_EVENT (1 << 16)
1636#define VENUM_EVENT (2 << 16)
1637#define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT)
1638#define PMRESRn_EN BIT(31)
1639
1640static u32 krait_read_pmresrn(int n)
1641{
1642 u32 val;
1643
1644 switch (n) {
1645 case 0:
1646 asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
1647 break;
1648 case 1:
1649 asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
1650 break;
1651 case 2:
1652 asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
1653 break;
1654 default:
1655 BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1656 }
1657
1658 return val;
1659}
1660
1661static void krait_write_pmresrn(int n, u32 val)
1662{
1663 switch (n) {
1664 case 0:
1665 asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
1666 break;
1667 case 1:
1668 asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
1669 break;
1670 case 2:
1671 asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
1672 break;
1673 default:
1674 BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1675 }
1676}
1677
1678static u32 krait_read_vpmresr0(void)
1679{
1680 u32 val;
1681 asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
1682 return val;
1683}
1684
1685static void krait_write_vpmresr0(u32 val)
1686{
1687 asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
1688}
1689
1690static void krait_pre_vpmresr0(u32 *venum_orig_val, u32 *fp_orig_val)
1691{
1692 u32 venum_new_val;
1693 u32 fp_new_val;
1694
1695 BUG_ON(preemptible());
1696 /* CPACR Enable CP10 and CP11 access */
1697 *venum_orig_val = get_copro_access();
1698 venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11);
1699 set_copro_access(venum_new_val);
1700
1701 /* Enable FPEXC */
1702 *fp_orig_val = fmrx(FPEXC);
1703 fp_new_val = *fp_orig_val | FPEXC_EN;
1704 fmxr(FPEXC, fp_new_val);
1705}
1706
1707static void krait_post_vpmresr0(u32 venum_orig_val, u32 fp_orig_val)
1708{
1709 BUG_ON(preemptible());
1710 /* Restore FPEXC */
1711 fmxr(FPEXC, fp_orig_val);
1712 isb();
1713 /* Restore CPACR */
1714 set_copro_access(venum_orig_val);
1715}
1716
1717static u32 krait_get_pmresrn_event(unsigned int region)
1718{
1719 static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0,
1720 KRAIT_PMRESR1_GROUP0,
1721 KRAIT_PMRESR2_GROUP0 };
1722 return pmresrn_table[region];
1723}
1724
1725static void krait_evt_setup(int idx, u32 config_base)
1726{
1727 u32 val;
1728 u32 mask;
1729 u32 vval, fval;
1730 unsigned int region;
1731 unsigned int group;
1732 unsigned int code;
1733 unsigned int group_shift;
1734 bool venum_event;
1735
1736 venum_event = !!(config_base & VENUM_EVENT);
1737 region = (config_base >> 12) & 0xf;
1738 code = (config_base >> 4) & 0xff;
1739 group = (config_base >> 0) & 0xf;
1740
1741 group_shift = group * 8;
1742 mask = 0xff << group_shift;
1743
1744 /* Configure evtsel for the region and group */
1745 if (venum_event)
1746 val = KRAIT_VPMRESR0_GROUP0;
1747 else
1748 val = krait_get_pmresrn_event(region);
1749 val += group;
1750 /* Mix in mode-exclusion bits */
1751 val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1752 armv7_pmnc_write_evtsel(idx, val);
1753
1754 asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1755
1756 if (venum_event) {
1757 krait_pre_vpmresr0(&vval, &fval);
1758 val = krait_read_vpmresr0();
1759 val &= ~mask;
1760 val |= code << group_shift;
1761 val |= PMRESRn_EN;
1762 krait_write_vpmresr0(val);
1763 krait_post_vpmresr0(vval, fval);
1764 } else {
1765 val = krait_read_pmresrn(region);
1766 val &= ~mask;
1767 val |= code << group_shift;
1768 val |= PMRESRn_EN;
1769 krait_write_pmresrn(region, val);
1770 }
1771}
1772
1773static u32 krait_clear_pmresrn_group(u32 val, int group)
1774{
1775 u32 mask;
1776 int group_shift;
1777
1778 group_shift = group * 8;
1779 mask = 0xff << group_shift;
1780 val &= ~mask;
1781
1782 /* Don't clear enable bit if entire region isn't disabled */
1783 if (val & ~PMRESRn_EN)
1784 return val |= PMRESRn_EN;
1785
1786 return 0;
1787}
1788
1789static void krait_clearpmu(u32 config_base)
1790{
1791 u32 val;
1792 u32 vval, fval;
1793 unsigned int region;
1794 unsigned int group;
1795 bool venum_event;
1796
1797 venum_event = !!(config_base & VENUM_EVENT);
1798 region = (config_base >> 12) & 0xf;
1799 group = (config_base >> 0) & 0xf;
1800
1801 if (venum_event) {
1802 krait_pre_vpmresr0(&vval, &fval);
1803 val = krait_read_vpmresr0();
1804 val = krait_clear_pmresrn_group(val, group);
1805 krait_write_vpmresr0(val);
1806 krait_post_vpmresr0(vval, fval);
1807 } else {
1808 val = krait_read_pmresrn(region);
1809 val = krait_clear_pmresrn_group(val, group);
1810 krait_write_pmresrn(region, val);
1811 }
1812}
1813
1814static void krait_pmu_disable_event(struct perf_event *event)
1815{
1816 unsigned long flags;
1817 struct hw_perf_event *hwc = &event->hw;
1818 int idx = hwc->idx;
1819 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1820
1821 /* Disable counter and interrupt */
1822 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1823
1824 /* Disable counter */
1825 armv7_pmnc_disable_counter(idx);
1826
1827 /*
1828 * Clear pmresr code (if destined for PMNx counters)
1829 */
1830 if (hwc->config_base & KRAIT_EVENT_MASK)
1831 krait_clearpmu(hwc->config_base);
1832
1833 /* Disable interrupt for this counter */
1834 armv7_pmnc_disable_intens(idx);
1835
1836 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1837}
1838
1839static void krait_pmu_enable_event(struct perf_event *event)
1840{
1841 unsigned long flags;
1842 struct hw_perf_event *hwc = &event->hw;
1843 int idx = hwc->idx;
1844 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1845
1846 /*
1847 * Enable counter and interrupt, and set the counter to count
1848 * the event that we're interested in.
1849 */
1850 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1851
1852 /* Disable counter */
1853 armv7_pmnc_disable_counter(idx);
1854
1855 /*
1856 * Set event (if destined for PMNx counters)
1857 * We set the event for the cycle counter because we
1858 * have the ability to perform event filtering.
1859 */
1860 if (hwc->config_base & KRAIT_EVENT_MASK)
1861 krait_evt_setup(idx, hwc->config_base);
1862 else
1863 armv7_pmnc_write_evtsel(idx, hwc->config_base);
1864
1865 /* Enable interrupt for this counter */
1866 armv7_pmnc_enable_intens(idx);
1867
1868 /* Enable counter */
1869 armv7_pmnc_enable_counter(idx);
1870
1871 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1872}
1873
1874static void krait_pmu_reset(void *info)
1875{
1876 u32 vval, fval;
1877
1878 armv7pmu_reset(info);
1879
1880 /* Clear all pmresrs */
1881 krait_write_pmresrn(0, 0);
1882 krait_write_pmresrn(1, 0);
1883 krait_write_pmresrn(2, 0);
1884
1885 krait_pre_vpmresr0(&vval, &fval);
1886 krait_write_vpmresr0(0);
1887 krait_post_vpmresr0(vval, fval);
1888}
1889
1890static int krait_event_to_bit(struct perf_event *event, unsigned int region,
1891 unsigned int group)
1892{
1893 int bit;
1894 struct hw_perf_event *hwc = &event->hw;
1895 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1896
1897 if (hwc->config_base & VENUM_EVENT)
1898 bit = KRAIT_VPMRESR0_GROUP0;
1899 else
1900 bit = krait_get_pmresrn_event(region);
1901 bit -= krait_get_pmresrn_event(0);
1902 bit += group;
1903 /*
1904 * Lower bits are reserved for use by the counters (see
1905 * armv7pmu_get_event_idx() for more info)
1906 */
1907 bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1908
1909 return bit;
1910}
1911
1912/*
1913 * We check for column exclusion constraints here.
1914 * Two events cant use the same group within a pmresr register.
1915 */
1916static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1917 struct perf_event *event)
1918{
1919 int idx;
1920 int bit;
1921 unsigned int prefix;
1922 unsigned int region;
1923 unsigned int code;
1924 unsigned int group;
1925 bool krait_event;
1926 struct hw_perf_event *hwc = &event->hw;
1927
1928 region = (hwc->config_base >> 12) & 0xf;
1929 code = (hwc->config_base >> 4) & 0xff;
1930 group = (hwc->config_base >> 0) & 0xf;
1931 krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK);
1932
1933 if (krait_event) {
1934 /* Ignore invalid events */
1935 if (group > 3 || region > 2)
1936 return -EINVAL;
1937 prefix = hwc->config_base & KRAIT_EVENT_MASK;
1938 if (prefix != KRAIT_EVENT && prefix != VENUM_EVENT)
1939 return -EINVAL;
1940 if (prefix == VENUM_EVENT && (code & 0xe0))
1941 return -EINVAL;
1942
1943 bit = krait_event_to_bit(event, region, group);
1944 if (test_and_set_bit(bit, cpuc->used_mask))
1945 return -EAGAIN;
1946 }
1947
1948 idx = armv7pmu_get_event_idx(cpuc, event);
1949 if (idx < 0 && krait_event)
1950 clear_bit(bit, cpuc->used_mask);
1951
1952 return idx;
1953}
1954
1955static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1956 struct perf_event *event)
1957{
1958 int bit;
1959 struct hw_perf_event *hwc = &event->hw;
1960 unsigned int region;
1961 unsigned int group;
1962 bool krait_event;
1963
1964 region = (hwc->config_base >> 12) & 0xf;
1965 group = (hwc->config_base >> 0) & 0xf;
1966 krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK);
1967
1968 if (krait_event) {
1969 bit = krait_event_to_bit(event, region, group);
1970 clear_bit(bit, cpuc->used_mask);
1971 }
1972}
1973
1974static int krait_pmu_init(struct arm_pmu *cpu_pmu)
1975{
1976 armv7pmu_init(cpu_pmu);
1977 cpu_pmu->name = "ARMv7 Krait";
1978 /* Some early versions of Krait don't support PC write events */
1979 if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
1980 "qcom,no-pc-write"))
1981 cpu_pmu->map_event = krait_map_event_no_branch;
1982 else
1983 cpu_pmu->map_event = krait_map_event;
1984 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1985 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1986 cpu_pmu->reset = krait_pmu_reset;
1987 cpu_pmu->enable = krait_pmu_enable_event;
1988 cpu_pmu->disable = krait_pmu_disable_event;
1989 cpu_pmu->get_event_idx = krait_pmu_get_event_idx;
1990 cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
1991 return 0;
1992}
1286#else 1993#else
1287static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) 1994static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1288{ 1995{
@@ -1308,4 +2015,14 @@ static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1308{ 2015{
1309 return -ENODEV; 2016 return -ENODEV;
1310} 2017}
2018
2019static inline int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
2020{
2021 return -ENODEV;
2022}
2023
2024static inline int krait_pmu_init(struct arm_pmu *cpu_pmu)
2025{
2026 return -ENODEV;
2027}
1311#endif /* CONFIG_CPU_V7 */ 2028#endif /* CONFIG_CPU_V7 */