aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2010-04-30 06:37:51 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-05-17 06:53:58 -0400
commitfe166148f699cc7865ca07b6754872cfb5ebc312 (patch)
tree421392e3fb9866c40cc8cfd952be44fd63336221 /arch/arm
parent8c1fc96f6fd1f361428ba805103af0d0eee65179 (diff)
ARM: 6073/1: oprofile: remove old files and update KConfig
Enable hardware perf-events if CPU_HAS_PMU and select HAVE_OPROFILE if HAVE_PERF_EVENTS. If no hardware support is present, OProfile will fall back to timer mode. This patch also removes the old OProfile drivers in favour of the code implemented by perf. Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig26
-rw-r--r--arch/arm/oprofile/Makefile7
-rw-r--r--arch/arm/oprofile/backtrace.c83
-rw-r--r--arch/arm/oprofile/op_arm_model.h35
-rw-r--r--arch/arm/oprofile/op_counter.h27
-rw-r--r--arch/arm/oprofile/op_model_arm11_core.c162
-rw-r--r--arch/arm/oprofile/op_model_arm11_core.h45
-rw-r--r--arch/arm/oprofile/op_model_mpcore.c306
-rw-r--r--arch/arm/oprofile/op_model_mpcore.h61
-rw-r--r--arch/arm/oprofile/op_model_v6.c78
-rw-r--r--arch/arm/oprofile/op_model_v7.c415
-rw-r--r--arch/arm/oprofile/op_model_v7.h103
-rw-r--r--arch/arm/oprofile/op_model_xscale.c444
13 files changed, 3 insertions, 1789 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index daaa4b2b46e1..f7c2a882b54d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -13,7 +13,7 @@ config ARM
13 select RTC_LIB 13 select RTC_LIB
14 select SYS_SUPPORTS_APM_EMULATION 14 select SYS_SUPPORTS_APM_EMULATION
15 select GENERIC_ATOMIC64 if (!CPU_32v6K) 15 select GENERIC_ATOMIC64 if (!CPU_32v6K)
16 select HAVE_OPROFILE 16 select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
17 select HAVE_ARCH_KGDB 17 select HAVE_ARCH_KGDB
18 select HAVE_KPROBES if (!XIP_KERNEL) 18 select HAVE_KPROBES if (!XIP_KERNEL)
19 select HAVE_KRETPROBES if (HAVE_KPROBES) 19 select HAVE_KRETPROBES if (HAVE_KPROBES)
@@ -181,28 +181,6 @@ config ARM_L1_CACHE_SHIFT_6
181 help 181 help
182 Setting ARM L1 cache line size to 64 Bytes. 182 Setting ARM L1 cache line size to 64 Bytes.
183 183
184if OPROFILE
185
186config OPROFILE_ARMV6
187 def_bool y
188 depends on CPU_V6 && !SMP
189 select OPROFILE_ARM11_CORE
190
191config OPROFILE_MPCORE
192 def_bool y
193 depends on CPU_V6 && SMP
194 select OPROFILE_ARM11_CORE
195
196config OPROFILE_ARM11_CORE
197 bool
198
199config OPROFILE_ARMV7
200 def_bool y
201 depends on CPU_V7 && !SMP
202 bool
203
204endif
205
206config VECTORS_BASE 184config VECTORS_BASE
207 hex 185 hex
208 default 0xffff0000 if MMU || CPU_HIGH_VECTOR 186 default 0xffff0000 if MMU || CPU_HIGH_VECTOR
@@ -1334,7 +1312,7 @@ config HIGHPTE
1334 1312
1335config HW_PERF_EVENTS 1313config HW_PERF_EVENTS
1336 bool "Enable hardware performance counter support for perf events" 1314 bool "Enable hardware performance counter support for perf events"
1337 depends on PERF_EVENTS && CPU_HAS_PMU && (CPU_V6 || CPU_V7) 1315 depends on PERF_EVENTS && CPU_HAS_PMU
1338 default y 1316 default y
1339 help 1317 help
1340 Enable hardware performance counter support for perf events. If 1318 Enable hardware performance counter support for perf events. If
diff --git a/arch/arm/oprofile/Makefile b/arch/arm/oprofile/Makefile
index 88e31f549f50..e666eafed152 100644
--- a/arch/arm/oprofile/Makefile
+++ b/arch/arm/oprofile/Makefile
@@ -6,9 +6,4 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
6 oprofilefs.o oprofile_stats.o \ 6 oprofilefs.o oprofile_stats.o \
7 timer_int.o ) 7 timer_int.o )
8 8
9oprofile-y := $(DRIVER_OBJS) common.o backtrace.o 9oprofile-y := $(DRIVER_OBJS) common.o
10oprofile-$(CONFIG_CPU_XSCALE) += op_model_xscale.o
11oprofile-$(CONFIG_OPROFILE_ARM11_CORE) += op_model_arm11_core.o
12oprofile-$(CONFIG_OPROFILE_ARMV6) += op_model_v6.o
13oprofile-$(CONFIG_OPROFILE_MPCORE) += op_model_mpcore.o
14oprofile-$(CONFIG_OPROFILE_ARMV7) += op_model_v7.o
diff --git a/arch/arm/oprofile/backtrace.c b/arch/arm/oprofile/backtrace.c
deleted file mode 100644
index d805a52b5032..000000000000
--- a/arch/arm/oprofile/backtrace.c
+++ /dev/null
@@ -1,83 +0,0 @@
1/*
2 * Arm specific backtracing code for oprofile
3 *
4 * Copyright 2005 Openedhand Ltd.
5 *
6 * Author: Richard Purdie <rpurdie@openedhand.com>
7 *
8 * Based on i386 oprofile backtrace code by John Levon, David Smith
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#include <linux/oprofile.h>
17#include <linux/sched.h>
18#include <linux/mm.h>
19#include <linux/uaccess.h>
20#include <asm/ptrace.h>
21#include <asm/stacktrace.h>
22
23static int report_trace(struct stackframe *frame, void *d)
24{
25 unsigned int *depth = d;
26
27 if (*depth) {
28 oprofile_add_trace(frame->pc);
29 (*depth)--;
30 }
31
32 return *depth == 0;
33}
34
35/*
36 * The registers we're interested in are at the end of the variable
37 * length saved register structure. The fp points at the end of this
38 * structure so the address of this struct is:
39 * (struct frame_tail *)(xxx->fp)-1
40 */
41struct frame_tail {
42 struct frame_tail *fp;
43 unsigned long sp;
44 unsigned long lr;
45} __attribute__((packed));
46
47static struct frame_tail* user_backtrace(struct frame_tail *tail)
48{
49 struct frame_tail buftail[2];
50
51 /* Also check accessibility of one struct frame_tail beyond */
52 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
53 return NULL;
54 if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail)))
55 return NULL;
56
57 oprofile_add_trace(buftail[0].lr);
58
59 /* frame pointers should strictly progress back up the stack
60 * (towards higher addresses) */
61 if (tail >= buftail[0].fp)
62 return NULL;
63
64 return buftail[0].fp-1;
65}
66
67void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
68{
69 struct frame_tail *tail = ((struct frame_tail *) regs->ARM_fp) - 1;
70
71 if (!user_mode(regs)) {
72 struct stackframe frame;
73 frame.fp = regs->ARM_fp;
74 frame.sp = regs->ARM_sp;
75 frame.lr = regs->ARM_lr;
76 frame.pc = regs->ARM_pc;
77 walk_stackframe(&frame, report_trace, &depth);
78 return;
79 }
80
81 while (depth-- && tail && !((unsigned long) tail & 3))
82 tail = user_backtrace(tail);
83}
diff --git a/arch/arm/oprofile/op_arm_model.h b/arch/arm/oprofile/op_arm_model.h
deleted file mode 100644
index 8c4e4f6a1de3..000000000000
--- a/arch/arm/oprofile/op_arm_model.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/**
2 * @file op_arm_model.h
3 * interface to ARM machine specific operations
4 *
5 * @remark Copyright 2004 Oprofile Authors
6 * @remark Read the file COPYING
7 *
8 * @author Zwane Mwaikambo
9 */
10
11#ifndef OP_ARM_MODEL_H
12#define OP_ARM_MODEL_H
13
14struct op_arm_model_spec {
15 int (*init)(void);
16 unsigned int num_counters;
17 int (*setup_ctrs)(void);
18 int (*start)(void);
19 void (*stop)(void);
20 char *name;
21};
22
23#ifdef CONFIG_CPU_XSCALE
24extern struct op_arm_model_spec op_xscale_spec;
25#endif
26
27extern struct op_arm_model_spec op_armv6_spec;
28extern struct op_arm_model_spec op_mpcore_spec;
29extern struct op_arm_model_spec op_armv7_spec;
30
31extern void arm_backtrace(struct pt_regs * const regs, unsigned int depth);
32
33extern int __init op_arm_init(struct oprofile_operations *ops, struct op_arm_model_spec *spec);
34extern void op_arm_exit(void);
35#endif /* OP_ARM_MODEL_H */
diff --git a/arch/arm/oprofile/op_counter.h b/arch/arm/oprofile/op_counter.h
deleted file mode 100644
index ca942a63b52f..000000000000
--- a/arch/arm/oprofile/op_counter.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/**
2 * @file op_counter.h
3 *
4 * @remark Copyright 2004 Oprofile Authors
5 * @remark Read the file COPYING
6 *
7 * @author Zwane Mwaikambo
8 */
9
10#ifndef OP_COUNTER_H
11#define OP_COUNTER_H
12
13/* Per performance monitor configuration as set via
14 * oprofilefs.
15 */
16struct op_counter_config {
17 unsigned long count;
18 unsigned long enabled;
19 unsigned long event;
20 unsigned long unit_mask;
21 unsigned long kernel;
22 unsigned long user;
23};
24
25extern struct op_counter_config *counter_config;
26
27#endif /* OP_COUNTER_H */
diff --git a/arch/arm/oprofile/op_model_arm11_core.c b/arch/arm/oprofile/op_model_arm11_core.c
deleted file mode 100644
index ef3e2653b90c..000000000000
--- a/arch/arm/oprofile/op_model_arm11_core.c
+++ /dev/null
@@ -1,162 +0,0 @@
1/**
2 * @file op_model_arm11_core.c
3 * ARM11 Event Monitor Driver
4 * @remark Copyright 2004 ARM SMP Development Team
5 */
6#include <linux/types.h>
7#include <linux/errno.h>
8#include <linux/oprofile.h>
9#include <linux/interrupt.h>
10#include <linux/irq.h>
11#include <linux/smp.h>
12
13#include "op_counter.h"
14#include "op_arm_model.h"
15#include "op_model_arm11_core.h"
16
17/*
18 * ARM11 PMU support
19 */
20static inline void arm11_write_pmnc(u32 val)
21{
22 /* upper 4bits and 7, 11 are write-as-0 */
23 val &= 0x0ffff77f;
24 asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r" (val));
25}
26
27static inline u32 arm11_read_pmnc(void)
28{
29 u32 val;
30 asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r" (val));
31 return val;
32}
33
34static void arm11_reset_counter(unsigned int cnt)
35{
36 u32 val = -(u32)counter_config[CPU_COUNTER(smp_processor_id(), cnt)].count;
37 switch (cnt) {
38 case CCNT:
39 asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r" (val));
40 break;
41
42 case PMN0:
43 asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r" (val));
44 break;
45
46 case PMN1:
47 asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r" (val));
48 break;
49 }
50}
51
52int arm11_setup_pmu(void)
53{
54 unsigned int cnt;
55 u32 pmnc;
56
57 if (arm11_read_pmnc() & PMCR_E) {
58 printk(KERN_ERR "oprofile: CPU%u PMU still enabled when setup new event counter.\n", smp_processor_id());
59 return -EBUSY;
60 }
61
62 /* initialize PMNC, reset overflow, D bit, C bit and P bit. */
63 arm11_write_pmnc(PMCR_OFL_PMN0 | PMCR_OFL_PMN1 | PMCR_OFL_CCNT |
64 PMCR_C | PMCR_P);
65
66 for (pmnc = 0, cnt = PMN0; cnt <= CCNT; cnt++) {
67 unsigned long event;
68
69 if (!counter_config[CPU_COUNTER(smp_processor_id(), cnt)].enabled)
70 continue;
71
72 event = counter_config[CPU_COUNTER(smp_processor_id(), cnt)].event & 255;
73
74 /*
75 * Set event (if destined for PMNx counters)
76 */
77 if (cnt == PMN0) {
78 pmnc |= event << 20;
79 } else if (cnt == PMN1) {
80 pmnc |= event << 12;
81 }
82
83 /*
84 * We don't need to set the event if it's a cycle count
85 * Enable interrupt for this counter
86 */
87 pmnc |= PMCR_IEN_PMN0 << cnt;
88 arm11_reset_counter(cnt);
89 }
90 arm11_write_pmnc(pmnc);
91
92 return 0;
93}
94
95int arm11_start_pmu(void)
96{
97 arm11_write_pmnc(arm11_read_pmnc() | PMCR_E);
98 return 0;
99}
100
101int arm11_stop_pmu(void)
102{
103 unsigned int cnt;
104
105 arm11_write_pmnc(arm11_read_pmnc() & ~PMCR_E);
106
107 for (cnt = PMN0; cnt <= CCNT; cnt++)
108 arm11_reset_counter(cnt);
109
110 return 0;
111}
112
113/*
114 * CPU counters' IRQ handler (one IRQ per CPU)
115 */
116static irqreturn_t arm11_pmu_interrupt(int irq, void *arg)
117{
118 struct pt_regs *regs = get_irq_regs();
119 unsigned int cnt;
120 u32 pmnc;
121
122 pmnc = arm11_read_pmnc();
123
124 for (cnt = PMN0; cnt <= CCNT; cnt++) {
125 if ((pmnc & (PMCR_OFL_PMN0 << cnt)) && (pmnc & (PMCR_IEN_PMN0 << cnt))) {
126 arm11_reset_counter(cnt);
127 oprofile_add_sample(regs, CPU_COUNTER(smp_processor_id(), cnt));
128 }
129 }
130 /* Clear counter flag(s) */
131 arm11_write_pmnc(pmnc);
132 return IRQ_HANDLED;
133}
134
135int arm11_request_interrupts(const int *irqs, int nr)
136{
137 unsigned int i;
138 int ret = 0;
139
140 for(i = 0; i < nr; i++) {
141 ret = request_irq(irqs[i], arm11_pmu_interrupt, IRQF_DISABLED, "CP15 PMU", NULL);
142 if (ret != 0) {
143 printk(KERN_ERR "oprofile: unable to request IRQ%u for MPCORE-EM\n",
144 irqs[i]);
145 break;
146 }
147 }
148
149 if (i != nr)
150 while (i-- != 0)
151 free_irq(irqs[i], NULL);
152
153 return ret;
154}
155
156void arm11_release_interrupts(const int *irqs, int nr)
157{
158 unsigned int i;
159
160 for (i = 0; i < nr; i++)
161 free_irq(irqs[i], NULL);
162}
diff --git a/arch/arm/oprofile/op_model_arm11_core.h b/arch/arm/oprofile/op_model_arm11_core.h
deleted file mode 100644
index 1902b99d9dfd..000000000000
--- a/arch/arm/oprofile/op_model_arm11_core.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/**
2 * @file op_model_arm11_core.h
3 * ARM11 Event Monitor Driver
4 * @remark Copyright 2004 ARM SMP Development Team
5 * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
6 * @remark Copyright 2000-2004 MontaVista Software Inc
7 * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
8 * @remark Copyright 2004 Intel Corporation
9 * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
10 * @remark Copyright 2004 Oprofile Authors
11 *
12 * @remark Read the file COPYING
13 *
14 * @author Zwane Mwaikambo
15 */
16#ifndef OP_MODEL_ARM11_CORE_H
17#define OP_MODEL_ARM11_CORE_H
18
19/*
20 * Per-CPU PMCR
21 */
22#define PMCR_E (1 << 0) /* Enable */
23#define PMCR_P (1 << 1) /* Count reset */
24#define PMCR_C (1 << 2) /* Cycle counter reset */
25#define PMCR_D (1 << 3) /* Cycle counter counts every 64th cpu cycle */
26#define PMCR_IEN_PMN0 (1 << 4) /* Interrupt enable count reg 0 */
27#define PMCR_IEN_PMN1 (1 << 5) /* Interrupt enable count reg 1 */
28#define PMCR_IEN_CCNT (1 << 6) /* Interrupt enable cycle counter */
29#define PMCR_OFL_PMN0 (1 << 8) /* Count reg 0 overflow */
30#define PMCR_OFL_PMN1 (1 << 9) /* Count reg 1 overflow */
31#define PMCR_OFL_CCNT (1 << 10) /* Cycle counter overflow */
32
33#define PMN0 0
34#define PMN1 1
35#define CCNT 2
36
37#define CPU_COUNTER(cpu, counter) ((cpu) * 3 + (counter))
38
39int arm11_setup_pmu(void);
40int arm11_start_pmu(void);
41int arm11_stop_pmu(void);
42int arm11_request_interrupts(const int *, int);
43void arm11_release_interrupts(const int *, int);
44
45#endif
diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c
deleted file mode 100644
index f73ce875a395..000000000000
--- a/arch/arm/oprofile/op_model_mpcore.c
+++ /dev/null
@@ -1,306 +0,0 @@
1/**
2 * @file op_model_mpcore.c
3 * MPCORE Event Monitor Driver
4 * @remark Copyright 2004 ARM SMP Development Team
5 * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
6 * @remark Copyright 2000-2004 MontaVista Software Inc
7 * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
8 * @remark Copyright 2004 Intel Corporation
9 * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
10 * @remark Copyright 2004 Oprofile Authors
11 *
12 * @remark Read the file COPYING
13 *
14 * @author Zwane Mwaikambo
15 *
16 * Counters:
17 * 0: PMN0 on CPU0, per-cpu configurable event counter
18 * 1: PMN1 on CPU0, per-cpu configurable event counter
19 * 2: CCNT on CPU0
20 * 3: PMN0 on CPU1
21 * 4: PMN1 on CPU1
22 * 5: CCNT on CPU1
23 * 6: PMN0 on CPU1
24 * 7: PMN1 on CPU1
25 * 8: CCNT on CPU1
26 * 9: PMN0 on CPU1
27 * 10: PMN1 on CPU1
28 * 11: CCNT on CPU1
29 * 12-19: configurable SCU event counters
30 */
31
32/* #define DEBUG */
33#include <linux/types.h>
34#include <linux/errno.h>
35#include <linux/err.h>
36#include <linux/sched.h>
37#include <linux/oprofile.h>
38#include <linux/interrupt.h>
39#include <linux/smp.h>
40#include <linux/io.h>
41
42#include <asm/irq.h>
43#include <asm/mach/irq.h>
44#include <mach/hardware.h>
45#include <mach/board-eb.h>
46#include <asm/system.h>
47#include <asm/pmu.h>
48
49#include "op_counter.h"
50#include "op_arm_model.h"
51#include "op_model_arm11_core.h"
52#include "op_model_mpcore.h"
53
54/*
55 * MPCore SCU event monitor support
56 */
57#define SCU_EVENTMONITORS_VA_BASE __io_address(REALVIEW_EB11MP_SCU_BASE + 0x10)
58
59/*
60 * Bitmask of used SCU counters
61 */
62static unsigned int scu_em_used;
63static const struct pmu_irqs *pmu_irqs;
64
65/*
66 * 2 helper fns take a counter number from 0-7 (not the userspace-visible counter number)
67 */
68static inline void scu_reset_counter(struct eventmonitor __iomem *emc, unsigned int n)
69{
70 writel(-(u32)counter_config[SCU_COUNTER(n)].count, &emc->MC[n]);
71}
72
73static inline void scu_set_event(struct eventmonitor __iomem *emc, unsigned int n, u32 event)
74{
75 event &= 0xff;
76 writeb(event, &emc->MCEB[n]);
77}
78
79/*
80 * SCU counters' IRQ handler (one IRQ per counter => 2 IRQs per CPU)
81 */
82static irqreturn_t scu_em_interrupt(int irq, void *arg)
83{
84 struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
85 unsigned int cnt;
86
87 cnt = irq - IRQ_EB11MP_PMU_SCU0;
88 oprofile_add_sample(get_irq_regs(), SCU_COUNTER(cnt));
89 scu_reset_counter(emc, cnt);
90
91 /* Clear overflow flag for this counter */
92 writel(1 << (cnt + 16), &emc->PMCR);
93
94 return IRQ_HANDLED;
95}
96
97/* Configure just the SCU counters that the user has requested */
98static void scu_setup(void)
99{
100 struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
101 unsigned int i;
102
103 scu_em_used = 0;
104
105 for (i = 0; i < NUM_SCU_COUNTERS; i++) {
106 if (counter_config[SCU_COUNTER(i)].enabled &&
107 counter_config[SCU_COUNTER(i)].event) {
108 scu_set_event(emc, i, 0); /* disable counter for now */
109 scu_em_used |= 1 << i;
110 }
111 }
112}
113
114static int scu_start(void)
115{
116 struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
117 unsigned int temp, i;
118 unsigned long event;
119 int ret = 0;
120
121 /*
122 * request the SCU counter interrupts that we need
123 */
124 for (i = 0; i < NUM_SCU_COUNTERS; i++) {
125 if (scu_em_used & (1 << i)) {
126 ret = request_irq(IRQ_EB11MP_PMU_SCU0 + i, scu_em_interrupt, IRQF_DISABLED, "SCU PMU", NULL);
127 if (ret) {
128 printk(KERN_ERR "oprofile: unable to request IRQ%u for SCU Event Monitor\n",
129 IRQ_EB11MP_PMU_SCU0 + i);
130 goto err_free_scu;
131 }
132 }
133 }
134
135 /*
136 * clear overflow and enable interrupt for all used counters
137 */
138 temp = readl(&emc->PMCR);
139 for (i = 0; i < NUM_SCU_COUNTERS; i++) {
140 if (scu_em_used & (1 << i)) {
141 scu_reset_counter(emc, i);
142 event = counter_config[SCU_COUNTER(i)].event;
143 scu_set_event(emc, i, event);
144
145 /* clear overflow/interrupt */
146 temp |= 1 << (i + 16);
147 /* enable interrupt*/
148 temp |= 1 << (i + 8);
149 }
150 }
151
152 /* Enable all 8 counters */
153 temp |= PMCR_E;
154 writel(temp, &emc->PMCR);
155
156 return 0;
157
158 err_free_scu:
159 while (i--)
160 free_irq(IRQ_EB11MP_PMU_SCU0 + i, NULL);
161 return ret;
162}
163
164static void scu_stop(void)
165{
166 struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
167 unsigned int temp, i;
168
169 /* Disable counter interrupts */
170 /* Don't disable all 8 counters (with the E bit) as they may be in use */
171 temp = readl(&emc->PMCR);
172 for (i = 0; i < NUM_SCU_COUNTERS; i++) {
173 if (scu_em_used & (1 << i))
174 temp &= ~(1 << (i + 8));
175 }
176 writel(temp, &emc->PMCR);
177
178 /* Free counter interrupts and reset counters */
179 for (i = 0; i < NUM_SCU_COUNTERS; i++) {
180 if (scu_em_used & (1 << i)) {
181 scu_reset_counter(emc, i);
182 free_irq(IRQ_EB11MP_PMU_SCU0 + i, NULL);
183 }
184 }
185}
186
187struct em_function_data {
188 int (*fn)(void);
189 int ret;
190};
191
192static void em_func(void *data)
193{
194 struct em_function_data *d = data;
195 int ret = d->fn();
196 if (ret)
197 d->ret = ret;
198}
199
200static int em_call_function(int (*fn)(void))
201{
202 struct em_function_data data;
203
204 data.fn = fn;
205 data.ret = 0;
206
207 preempt_disable();
208 smp_call_function(em_func, &data, 1);
209 em_func(&data);
210 preempt_enable();
211
212 return data.ret;
213}
214
215/*
216 * Glue to stick the individual ARM11 PMUs and the SCU
217 * into the oprofile framework.
218 */
219static int em_setup_ctrs(void)
220{
221 int ret;
222
223 /* Configure CPU counters by cross-calling to the other CPUs */
224 ret = em_call_function(arm11_setup_pmu);
225 if (ret == 0)
226 scu_setup();
227
228 return 0;
229}
230
231static int em_start(void)
232{
233 int ret;
234
235 pmu_irqs = reserve_pmu();
236 if (IS_ERR(pmu_irqs)) {
237 ret = PTR_ERR(pmu_irqs);
238 goto out;
239 }
240
241 ret = arm11_request_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
242 if (ret == 0) {
243 em_call_function(arm11_start_pmu);
244
245 ret = scu_start();
246 if (ret) {
247 arm11_release_interrupts(pmu_irqs->irqs,
248 pmu_irqs->num_irqs);
249 } else {
250 release_pmu(pmu_irqs);
251 pmu_irqs = NULL;
252 }
253 }
254
255out:
256 return ret;
257}
258
259static void em_stop(void)
260{
261 em_call_function(arm11_stop_pmu);
262 arm11_release_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
263 scu_stop();
264 release_pmu(pmu_irqs);
265}
266
267/*
268 * Why isn't there a function to route an IRQ to a specific CPU in
269 * genirq?
270 */
271static void em_route_irq(int irq, unsigned int cpu)
272{
273 struct irq_desc *desc = irq_desc + irq;
274 const struct cpumask *mask = cpumask_of(cpu);
275
276 spin_lock_irq(&desc->lock);
277 cpumask_copy(desc->affinity, mask);
278 desc->chip->set_affinity(irq, mask);
279 spin_unlock_irq(&desc->lock);
280}
281
282static int em_setup(void)
283{
284 /*
285 * Send SCU PMU interrupts to the "owner" CPU.
286 */
287 em_route_irq(IRQ_EB11MP_PMU_SCU0, 0);
288 em_route_irq(IRQ_EB11MP_PMU_SCU1, 0);
289 em_route_irq(IRQ_EB11MP_PMU_SCU2, 1);
290 em_route_irq(IRQ_EB11MP_PMU_SCU3, 1);
291 em_route_irq(IRQ_EB11MP_PMU_SCU4, 2);
292 em_route_irq(IRQ_EB11MP_PMU_SCU5, 2);
293 em_route_irq(IRQ_EB11MP_PMU_SCU6, 3);
294 em_route_irq(IRQ_EB11MP_PMU_SCU7, 3);
295
296 return init_pmu();
297}
298
299struct op_arm_model_spec op_mpcore_spec = {
300 .init = em_setup,
301 .num_counters = MPCORE_NUM_COUNTERS,
302 .setup_ctrs = em_setup_ctrs,
303 .start = em_start,
304 .stop = em_stop,
305 .name = "arm/mpcore",
306};
diff --git a/arch/arm/oprofile/op_model_mpcore.h b/arch/arm/oprofile/op_model_mpcore.h
deleted file mode 100644
index 73d811023688..000000000000
--- a/arch/arm/oprofile/op_model_mpcore.h
+++ /dev/null
@@ -1,61 +0,0 @@
1/**
2 * @file op_model_mpcore.c
3 * MPCORE Event Monitor Driver
4 * @remark Copyright 2004 ARM SMP Development Team
5 * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
6 * @remark Copyright 2000-2004 MontaVista Software Inc
7 * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
8 * @remark Copyright 2004 Intel Corporation
9 * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
10 * @remark Copyright 2004 Oprofile Authors
11 *
12 * @remark Read the file COPYING
13 *
14 * @author Zwane Mwaikambo
15 */
16#ifndef OP_MODEL_MPCORE_H
17#define OP_MODEL_MPCORE_H
18
19struct eventmonitor {
20 unsigned long PMCR;
21 unsigned char MCEB[8];
22 unsigned long MC[8];
23};
24
25/*
26 * List of userspace counter numbers: note that the structure is important.
27 * The code relies on CPUn's counters being CPU0's counters + 3n
28 * and on CPU0's counters starting at 0
29 */
30
31#define COUNTER_CPU0_PMN0 0
32#define COUNTER_CPU0_PMN1 1
33#define COUNTER_CPU0_CCNT 2
34
35#define COUNTER_CPU1_PMN0 3
36#define COUNTER_CPU1_PMN1 4
37#define COUNTER_CPU1_CCNT 5
38
39#define COUNTER_CPU2_PMN0 6
40#define COUNTER_CPU2_PMN1 7
41#define COUNTER_CPU2_CCNT 8
42
43#define COUNTER_CPU3_PMN0 9
44#define COUNTER_CPU3_PMN1 10
45#define COUNTER_CPU3_CCNT 11
46
47#define COUNTER_SCU_MN0 12
48#define COUNTER_SCU_MN1 13
49#define COUNTER_SCU_MN2 14
50#define COUNTER_SCU_MN3 15
51#define COUNTER_SCU_MN4 16
52#define COUNTER_SCU_MN5 17
53#define COUNTER_SCU_MN6 18
54#define COUNTER_SCU_MN7 19
55#define NUM_SCU_COUNTERS 8
56
57#define SCU_COUNTER(number) ((number) + COUNTER_SCU_MN0)
58
59#define MPCORE_NUM_COUNTERS SCU_COUNTER(NUM_SCU_COUNTERS)
60
61#endif
diff --git a/arch/arm/oprofile/op_model_v6.c b/arch/arm/oprofile/op_model_v6.c
deleted file mode 100644
index a22357a2fd08..000000000000
--- a/arch/arm/oprofile/op_model_v6.c
+++ /dev/null
@@ -1,78 +0,0 @@
1/**
2 * @file op_model_v6.c
3 * ARM11 Performance Monitor Driver
4 *
5 * Based on op_model_xscale.c
6 *
7 * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
8 * @remark Copyright 2000-2004 MontaVista Software Inc
9 * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
10 * @remark Copyright 2004 Intel Corporation
11 * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
12 * @remark Copyright 2004 OProfile Authors
13 *
14 * @remark Read the file COPYING
15 *
16 * @author Tony Lindgren <tony@atomide.com>
17 */
18
19/* #define DEBUG */
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/sched.h>
24#include <linux/oprofile.h>
25#include <linux/interrupt.h>
26#include <asm/irq.h>
27#include <asm/system.h>
28#include <asm/pmu.h>
29
30#include "op_counter.h"
31#include "op_arm_model.h"
32#include "op_model_arm11_core.h"
33
34static const struct pmu_irqs *pmu_irqs;
35
36static void armv6_pmu_stop(void)
37{
38 arm11_stop_pmu();
39 arm11_release_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
40 release_pmu(pmu_irqs);
41 pmu_irqs = NULL;
42}
43
44static int armv6_pmu_start(void)
45{
46 int ret;
47
48 pmu_irqs = reserve_pmu();
49 if (IS_ERR(pmu_irqs)) {
50 ret = PTR_ERR(pmu_irqs);
51 goto out;
52 }
53
54 ret = arm11_request_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
55 if (ret >= 0) {
56 ret = arm11_start_pmu();
57 } else {
58 release_pmu(pmu_irqs);
59 pmu_irqs = NULL;
60 }
61
62out:
63 return ret;
64}
65
66static int armv6_detect_pmu(void)
67{
68 return 0;
69}
70
71struct op_arm_model_spec op_armv6_spec = {
72 .init = armv6_detect_pmu,
73 .num_counters = 3,
74 .setup_ctrs = arm11_setup_pmu,
75 .start = armv6_pmu_start,
76 .stop = armv6_pmu_stop,
77 .name = "arm/armv6",
78};
diff --git a/arch/arm/oprofile/op_model_v7.c b/arch/arm/oprofile/op_model_v7.c
deleted file mode 100644
index 8642d0891ae1..000000000000
--- a/arch/arm/oprofile/op_model_v7.c
+++ /dev/null
@@ -1,415 +0,0 @@
1/**
2 * op_model_v7.c
3 * ARM V7 (Cortex A8) Event Monitor Driver
4 *
5 * Copyright 2008 Jean Pihet <jpihet@mvista.com>
6 * Copyright 2004 ARM SMP Development Team
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/types.h>
13#include <linux/errno.h>
14#include <linux/err.h>
15#include <linux/oprofile.h>
16#include <linux/interrupt.h>
17#include <linux/irq.h>
18#include <linux/smp.h>
19
20#include <asm/pmu.h>
21
22#include "op_counter.h"
23#include "op_arm_model.h"
24#include "op_model_v7.h"
25
26/* #define DEBUG */
27
28
29/*
30 * ARM V7 PMNC support
31 */
32
33static u32 cnt_en[CNTMAX];
34
35static inline void armv7_pmnc_write(u32 val)
36{
37 val &= PMNC_MASK;
38 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (val));
39}
40
41static inline u32 armv7_pmnc_read(void)
42{
43 u32 val;
44
45 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
46 return val;
47}
48
49static inline u32 armv7_pmnc_enable_counter(unsigned int cnt)
50{
51 u32 val;
52
53 if (cnt >= CNTMAX) {
54 printk(KERN_ERR "oprofile: CPU%u enabling wrong PMNC counter"
55 " %d\n", smp_processor_id(), cnt);
56 return -1;
57 }
58
59 if (cnt == CCNT)
60 val = CNTENS_C;
61 else
62 val = (1 << (cnt - CNT0));
63
64 val &= CNTENS_MASK;
65 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
66
67 return cnt;
68}
69
70static inline u32 armv7_pmnc_disable_counter(unsigned int cnt)
71{
72 u32 val;
73
74 if (cnt >= CNTMAX) {
75 printk(KERN_ERR "oprofile: CPU%u disabling wrong PMNC counter"
76 " %d\n", smp_processor_id(), cnt);
77 return -1;
78 }
79
80 if (cnt == CCNT)
81 val = CNTENC_C;
82 else
83 val = (1 << (cnt - CNT0));
84
85 val &= CNTENC_MASK;
86 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
87
88 return cnt;
89}
90
91static inline u32 armv7_pmnc_enable_intens(unsigned int cnt)
92{
93 u32 val;
94
95 if (cnt >= CNTMAX) {
96 printk(KERN_ERR "oprofile: CPU%u enabling wrong PMNC counter"
97 " interrupt enable %d\n", smp_processor_id(), cnt);
98 return -1;
99 }
100
101 if (cnt == CCNT)
102 val = INTENS_C;
103 else
104 val = (1 << (cnt - CNT0));
105
106 val &= INTENS_MASK;
107 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
108
109 return cnt;
110}
111
112static inline u32 armv7_pmnc_getreset_flags(void)
113{
114 u32 val;
115
116 /* Read */
117 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
118
119 /* Write to clear flags */
120 val &= FLAG_MASK;
121 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
122
123 return val;
124}
125
126static inline int armv7_pmnc_select_counter(unsigned int cnt)
127{
128 u32 val;
129
130 if ((cnt == CCNT) || (cnt >= CNTMAX)) {
131 printk(KERN_ERR "oprofile: CPU%u selecting wrong PMNC counteri"
132 " %d\n", smp_processor_id(), cnt);
133 return -1;
134 }
135
136 val = (cnt - CNT0) & SELECT_MASK;
137 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
138
139 return cnt;
140}
141
142static inline void armv7_pmnc_write_evtsel(unsigned int cnt, u32 val)
143{
144 if (armv7_pmnc_select_counter(cnt) == cnt) {
145 val &= EVTSEL_MASK;
146 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
147 }
148}
149
150static void armv7_pmnc_reset_counter(unsigned int cnt)
151{
152 u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), cnt);
153 u32 val = -(u32)counter_config[cpu_cnt].count;
154
155 switch (cnt) {
156 case CCNT:
157 armv7_pmnc_disable_counter(cnt);
158
159 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (val));
160
161 if (cnt_en[cnt] != 0)
162 armv7_pmnc_enable_counter(cnt);
163
164 break;
165
166 case CNT0:
167 case CNT1:
168 case CNT2:
169 case CNT3:
170 armv7_pmnc_disable_counter(cnt);
171
172 if (armv7_pmnc_select_counter(cnt) == cnt)
173 asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (val));
174
175 if (cnt_en[cnt] != 0)
176 armv7_pmnc_enable_counter(cnt);
177
178 break;
179
180 default:
181 printk(KERN_ERR "oprofile: CPU%u resetting wrong PMNC counter"
182 " %d\n", smp_processor_id(), cnt);
183 break;
184 }
185}
186
187int armv7_setup_pmnc(void)
188{
189 unsigned int cnt;
190
191 if (armv7_pmnc_read() & PMNC_E) {
192 printk(KERN_ERR "oprofile: CPU%u PMNC still enabled when setup"
193 " new event counter.\n", smp_processor_id());
194 return -EBUSY;
195 }
196
197 /* Initialize & Reset PMNC: C bit and P bit */
198 armv7_pmnc_write(PMNC_P | PMNC_C);
199
200
201 for (cnt = CCNT; cnt < CNTMAX; cnt++) {
202 unsigned long event;
203 u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), cnt);
204
205 /*
206 * Disable counter
207 */
208 armv7_pmnc_disable_counter(cnt);
209 cnt_en[cnt] = 0;
210
211 if (!counter_config[cpu_cnt].enabled)
212 continue;
213
214 event = counter_config[cpu_cnt].event & 255;
215
216 /*
217 * Set event (if destined for PMNx counters)
218 * We don't need to set the event if it's a cycle count
219 */
220 if (cnt != CCNT)
221 armv7_pmnc_write_evtsel(cnt, event);
222
223 /*
224 * Enable interrupt for this counter
225 */
226 armv7_pmnc_enable_intens(cnt);
227
228 /*
229 * Reset counter
230 */
231 armv7_pmnc_reset_counter(cnt);
232
233 /*
234 * Enable counter
235 */
236 armv7_pmnc_enable_counter(cnt);
237 cnt_en[cnt] = 1;
238 }
239
240 return 0;
241}
242
243static inline void armv7_start_pmnc(void)
244{
245 armv7_pmnc_write(armv7_pmnc_read() | PMNC_E);
246}
247
248static inline void armv7_stop_pmnc(void)
249{
250 armv7_pmnc_write(armv7_pmnc_read() & ~PMNC_E);
251}
252
253/*
254 * CPU counters' IRQ handler (one IRQ per CPU)
255 */
256static irqreturn_t armv7_pmnc_interrupt(int irq, void *arg)
257{
258 struct pt_regs *regs = get_irq_regs();
259 unsigned int cnt;
260 u32 flags;
261
262
263 /*
264 * Stop IRQ generation
265 */
266 armv7_stop_pmnc();
267
268 /*
269 * Get and reset overflow status flags
270 */
271 flags = armv7_pmnc_getreset_flags();
272
273 /*
274 * Cycle counter
275 */
276 if (flags & FLAG_C) {
277 u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), CCNT);
278 armv7_pmnc_reset_counter(CCNT);
279 oprofile_add_sample(regs, cpu_cnt);
280 }
281
282 /*
283 * PMNC counters 0:3
284 */
285 for (cnt = CNT0; cnt < CNTMAX; cnt++) {
286 if (flags & (1 << (cnt - CNT0))) {
287 u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), cnt);
288 armv7_pmnc_reset_counter(cnt);
289 oprofile_add_sample(regs, cpu_cnt);
290 }
291 }
292
293 /*
294 * Allow IRQ generation
295 */
296 armv7_start_pmnc();
297
298 return IRQ_HANDLED;
299}
300
301int armv7_request_interrupts(const int *irqs, int nr)
302{
303 unsigned int i;
304 int ret = 0;
305
306 for (i = 0; i < nr; i++) {
307 ret = request_irq(irqs[i], armv7_pmnc_interrupt,
308 IRQF_DISABLED, "CP15 PMNC", NULL);
309 if (ret != 0) {
310 printk(KERN_ERR "oprofile: unable to request IRQ%u"
311 " for ARMv7\n",
312 irqs[i]);
313 break;
314 }
315 }
316
317 if (i != nr)
318 while (i-- != 0)
319 free_irq(irqs[i], NULL);
320
321 return ret;
322}
323
324void armv7_release_interrupts(const int *irqs, int nr)
325{
326 unsigned int i;
327
328 for (i = 0; i < nr; i++)
329 free_irq(irqs[i], NULL);
330}
331
332#ifdef DEBUG
333static void armv7_pmnc_dump_regs(void)
334{
335 u32 val;
336 unsigned int cnt;
337
338 printk(KERN_INFO "PMNC registers dump:\n");
339
340 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
341 printk(KERN_INFO "PMNC =0x%08x\n", val);
342
343 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
344 printk(KERN_INFO "CNTENS=0x%08x\n", val);
345
346 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
347 printk(KERN_INFO "INTENS=0x%08x\n", val);
348
349 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
350 printk(KERN_INFO "FLAGS =0x%08x\n", val);
351
352 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
353 printk(KERN_INFO "SELECT=0x%08x\n", val);
354
355 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
356 printk(KERN_INFO "CCNT =0x%08x\n", val);
357
358 for (cnt = CNT0; cnt < CNTMAX; cnt++) {
359 armv7_pmnc_select_counter(cnt);
360 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
361 printk(KERN_INFO "CNT[%d] count =0x%08x\n", cnt-CNT0, val);
362 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
363 printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", cnt-CNT0, val);
364 }
365}
366#endif
367
368static const struct pmu_irqs *pmu_irqs;
369
370static void armv7_pmnc_stop(void)
371{
372#ifdef DEBUG
373 armv7_pmnc_dump_regs();
374#endif
375 armv7_stop_pmnc();
376 armv7_release_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
377 release_pmu(pmu_irqs);
378 pmu_irqs = NULL;
379}
380
381static int armv7_pmnc_start(void)
382{
383 int ret;
384
385 pmu_irqs = reserve_pmu();
386 if (IS_ERR(pmu_irqs))
387 return PTR_ERR(pmu_irqs);
388
389#ifdef DEBUG
390 armv7_pmnc_dump_regs();
391#endif
392 ret = armv7_request_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
393 if (ret >= 0) {
394 armv7_start_pmnc();
395 } else {
396 release_pmu(pmu_irqs);
397 pmu_irqs = NULL;
398 }
399
400 return ret;
401}
402
403static int armv7_detect_pmnc(void)
404{
405 return 0;
406}
407
408struct op_arm_model_spec op_armv7_spec = {
409 .init = armv7_detect_pmnc,
410 .num_counters = 5,
411 .setup_ctrs = armv7_setup_pmnc,
412 .start = armv7_pmnc_start,
413 .stop = armv7_pmnc_stop,
414 .name = "arm/armv7",
415};
diff --git a/arch/arm/oprofile/op_model_v7.h b/arch/arm/oprofile/op_model_v7.h
deleted file mode 100644
index 9ca334b39c75..000000000000
--- a/arch/arm/oprofile/op_model_v7.h
+++ /dev/null
@@ -1,103 +0,0 @@
1/**
2 * op_model_v7.h
3 * ARM v7 (Cortex A8) Event Monitor Driver
4 *
5 * Copyright 2008 Jean Pihet <jpihet@mvista.com>
6 * Copyright 2004 ARM SMP Development Team
7 * Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
8 * Copyright 2000-2004 MontaVista Software Inc
9 * Copyright 2004 Dave Jiang <dave.jiang@intel.com>
10 * Copyright 2004 Intel Corporation
11 * Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
12 * Copyright 2004 Oprofile Authors
13 *
14 * Read the file COPYING
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as
18 * published by the Free Software Foundation.
19 */
20#ifndef OP_MODEL_V7_H
21#define OP_MODEL_V7_H
22
23/*
24 * Per-CPU PMNC: config reg
25 */
26#define PMNC_E (1 << 0) /* Enable all counters */
27#define PMNC_P (1 << 1) /* Reset all counters */
28#define PMNC_C (1 << 2) /* Cycle counter reset */
29#define PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
30#define PMNC_X (1 << 4) /* Export to ETM */
31#define PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
32#define PMNC_MASK 0x3f /* Mask for writable bits */
33
34/*
35 * Available counters
36 */
37#define CCNT 0
38#define CNT0 1
39#define CNT1 2
40#define CNT2 3
41#define CNT3 4
42#define CNTMAX 5
43
44#define CPU_COUNTER(cpu, counter) ((cpu) * CNTMAX + (counter))
45
46/*
47 * CNTENS: counters enable reg
48 */
49#define CNTENS_P0 (1 << 0)
50#define CNTENS_P1 (1 << 1)
51#define CNTENS_P2 (1 << 2)
52#define CNTENS_P3 (1 << 3)
53#define CNTENS_C (1 << 31)
54#define CNTENS_MASK 0x8000000f /* Mask for writable bits */
55
56/*
57 * CNTENC: counters disable reg
58 */
59#define CNTENC_P0 (1 << 0)
60#define CNTENC_P1 (1 << 1)
61#define CNTENC_P2 (1 << 2)
62#define CNTENC_P3 (1 << 3)
63#define CNTENC_C (1 << 31)
64#define CNTENC_MASK 0x8000000f /* Mask for writable bits */
65
66/*
67 * INTENS: counters overflow interrupt enable reg
68 */
69#define INTENS_P0 (1 << 0)
70#define INTENS_P1 (1 << 1)
71#define INTENS_P2 (1 << 2)
72#define INTENS_P3 (1 << 3)
73#define INTENS_C (1 << 31)
74#define INTENS_MASK 0x8000000f /* Mask for writable bits */
75
76/*
77 * EVTSEL: Event selection reg
78 */
79#define EVTSEL_MASK 0x7f /* Mask for writable bits */
80
81/*
82 * SELECT: Counter selection reg
83 */
84#define SELECT_MASK 0x1f /* Mask for writable bits */
85
86/*
87 * FLAG: counters overflow flag status reg
88 */
89#define FLAG_P0 (1 << 0)
90#define FLAG_P1 (1 << 1)
91#define FLAG_P2 (1 << 2)
92#define FLAG_P3 (1 << 3)
93#define FLAG_C (1 << 31)
94#define FLAG_MASK 0x8000000f /* Mask for writable bits */
95
96
97int armv7_setup_pmu(void);
98int armv7_start_pmu(void);
99int armv7_stop_pmu(void);
100int armv7_request_interrupts(const int *, int);
101void armv7_release_interrupts(const int *, int);
102
103#endif
diff --git a/arch/arm/oprofile/op_model_xscale.c b/arch/arm/oprofile/op_model_xscale.c
deleted file mode 100644
index 1d34a02048bd..000000000000
--- a/arch/arm/oprofile/op_model_xscale.c
+++ /dev/null
@@ -1,444 +0,0 @@
1/**
2 * @file op_model_xscale.c
3 * XScale Performance Monitor Driver
4 *
5 * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
6 * @remark Copyright 2000-2004 MontaVista Software Inc
7 * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
8 * @remark Copyright 2004 Intel Corporation
9 * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
10 * @remark Copyright 2004 OProfile Authors
11 *
12 * @remark Read the file COPYING
13 *
14 * @author Zwane Mwaikambo
15 */
16
17/* #define DEBUG */
18#include <linux/types.h>
19#include <linux/errno.h>
20#include <linux/err.h>
21#include <linux/sched.h>
22#include <linux/oprofile.h>
23#include <linux/interrupt.h>
24#include <linux/irq.h>
25
26#include <asm/cputype.h>
27#include <asm/pmu.h>
28
29#include "op_counter.h"
30#include "op_arm_model.h"
31
32#define PMU_ENABLE 0x001 /* Enable counters */
33#define PMN_RESET 0x002 /* Reset event counters */
34#define CCNT_RESET 0x004 /* Reset clock counter */
35#define PMU_RESET (CCNT_RESET | PMN_RESET)
36#define PMU_CNT64 0x008 /* Make CCNT count every 64th cycle */
37
38/*
39 * Different types of events that can be counted by the XScale PMU
40 * as used by Oprofile userspace. Here primarily for documentation
41 * purposes.
42 */
43
44#define EVT_ICACHE_MISS 0x00
45#define EVT_ICACHE_NO_DELIVER 0x01
46#define EVT_DATA_STALL 0x02
47#define EVT_ITLB_MISS 0x03
48#define EVT_DTLB_MISS 0x04
49#define EVT_BRANCH 0x05
50#define EVT_BRANCH_MISS 0x06
51#define EVT_INSTRUCTION 0x07
52#define EVT_DCACHE_FULL_STALL 0x08
53#define EVT_DCACHE_FULL_STALL_CONTIG 0x09
54#define EVT_DCACHE_ACCESS 0x0A
55#define EVT_DCACHE_MISS 0x0B
56#define EVT_DCACE_WRITE_BACK 0x0C
57#define EVT_PC_CHANGED 0x0D
58#define EVT_BCU_REQUEST 0x10
59#define EVT_BCU_FULL 0x11
60#define EVT_BCU_DRAIN 0x12
61#define EVT_BCU_ECC_NO_ELOG 0x14
62#define EVT_BCU_1_BIT_ERR 0x15
63#define EVT_RMW 0x16
64/* EVT_CCNT is not hardware defined */
65#define EVT_CCNT 0xFE
66#define EVT_UNUSED 0xFF
67
68struct pmu_counter {
69 volatile unsigned long ovf;
70 unsigned long reset_counter;
71};
72
73enum { CCNT, PMN0, PMN1, PMN2, PMN3, MAX_COUNTERS };
74
75static struct pmu_counter results[MAX_COUNTERS];
76
77/*
78 * There are two versions of the PMU in current XScale processors
79 * with differing register layouts and number of performance counters.
80 * e.g. IOP32x is xsc1 whilst IOP33x is xsc2.
81 * We detect which register layout to use in xscale_detect_pmu()
82 */
83enum { PMU_XSC1, PMU_XSC2 };
84
85struct pmu_type {
86 int id;
87 char *name;
88 int num_counters;
89 unsigned int int_enable;
90 unsigned int cnt_ovf[MAX_COUNTERS];
91 unsigned int int_mask[MAX_COUNTERS];
92};
93
94static struct pmu_type pmu_parms[] = {
95 {
96 .id = PMU_XSC1,
97 .name = "arm/xscale1",
98 .num_counters = 3,
99 .int_mask = { [PMN0] = 0x10, [PMN1] = 0x20,
100 [CCNT] = 0x40 },
101 .cnt_ovf = { [CCNT] = 0x400, [PMN0] = 0x100,
102 [PMN1] = 0x200},
103 },
104 {
105 .id = PMU_XSC2,
106 .name = "arm/xscale2",
107 .num_counters = 5,
108 .int_mask = { [CCNT] = 0x01, [PMN0] = 0x02,
109 [PMN1] = 0x04, [PMN2] = 0x08,
110 [PMN3] = 0x10 },
111 .cnt_ovf = { [CCNT] = 0x01, [PMN0] = 0x02,
112 [PMN1] = 0x04, [PMN2] = 0x08,
113 [PMN3] = 0x10 },
114 },
115};
116
117static struct pmu_type *pmu;
118
119static void write_pmnc(u32 val)
120{
121 if (pmu->id == PMU_XSC1) {
122 /* upper 4bits and 7, 11 are write-as-0 */
123 val &= 0xffff77f;
124 __asm__ __volatile__ ("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
125 } else {
126 /* bits 4-23 are write-as-0, 24-31 are write ignored */
127 val &= 0xf;
128 __asm__ __volatile__ ("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
129 }
130}
131
132static u32 read_pmnc(void)
133{
134 u32 val;
135
136 if (pmu->id == PMU_XSC1)
137 __asm__ __volatile__ ("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
138 else {
139 __asm__ __volatile__ ("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
140 /* bits 1-2 and 4-23 are read-unpredictable */
141 val &= 0xff000009;
142 }
143
144 return val;
145}
146
147static u32 __xsc1_read_counter(int counter)
148{
149 u32 val = 0;
150
151 switch (counter) {
152 case CCNT:
153 __asm__ __volatile__ ("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
154 break;
155 case PMN0:
156 __asm__ __volatile__ ("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
157 break;
158 case PMN1:
159 __asm__ __volatile__ ("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
160 break;
161 }
162 return val;
163}
164
165static u32 __xsc2_read_counter(int counter)
166{
167 u32 val = 0;
168
169 switch (counter) {
170 case CCNT:
171 __asm__ __volatile__ ("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
172 break;
173 case PMN0:
174 __asm__ __volatile__ ("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
175 break;
176 case PMN1:
177 __asm__ __volatile__ ("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
178 break;
179 case PMN2:
180 __asm__ __volatile__ ("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
181 break;
182 case PMN3:
183 __asm__ __volatile__ ("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
184 break;
185 }
186 return val;
187}
188
189static u32 read_counter(int counter)
190{
191 u32 val;
192
193 if (pmu->id == PMU_XSC1)
194 val = __xsc1_read_counter(counter);
195 else
196 val = __xsc2_read_counter(counter);
197
198 return val;
199}
200
201static void __xsc1_write_counter(int counter, u32 val)
202{
203 switch (counter) {
204 case CCNT:
205 __asm__ __volatile__ ("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
206 break;
207 case PMN0:
208 __asm__ __volatile__ ("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
209 break;
210 case PMN1:
211 __asm__ __volatile__ ("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
212 break;
213 }
214}
215
216static void __xsc2_write_counter(int counter, u32 val)
217{
218 switch (counter) {
219 case CCNT:
220 __asm__ __volatile__ ("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
221 break;
222 case PMN0:
223 __asm__ __volatile__ ("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
224 break;
225 case PMN1:
226 __asm__ __volatile__ ("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
227 break;
228 case PMN2:
229 __asm__ __volatile__ ("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
230 break;
231 case PMN3:
232 __asm__ __volatile__ ("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
233 break;
234 }
235}
236
237static void write_counter(int counter, u32 val)
238{
239 if (pmu->id == PMU_XSC1)
240 __xsc1_write_counter(counter, val);
241 else
242 __xsc2_write_counter(counter, val);
243}
244
245static int xscale_setup_ctrs(void)
246{
247 u32 evtsel, pmnc;
248 int i;
249
250 for (i = CCNT; i < MAX_COUNTERS; i++) {
251 if (counter_config[i].enabled)
252 continue;
253
254 counter_config[i].event = EVT_UNUSED;
255 }
256
257 switch (pmu->id) {
258 case PMU_XSC1:
259 pmnc = (counter_config[PMN1].event << 20) | (counter_config[PMN0].event << 12);
260 pr_debug("xscale_setup_ctrs: pmnc: %#08x\n", pmnc);
261 write_pmnc(pmnc);
262 break;
263
264 case PMU_XSC2:
265 evtsel = counter_config[PMN0].event | (counter_config[PMN1].event << 8) |
266 (counter_config[PMN2].event << 16) | (counter_config[PMN3].event << 24);
267
268 pr_debug("xscale_setup_ctrs: evtsel %#08x\n", evtsel);
269 __asm__ __volatile__ ("mcr p14, 0, %0, c8, c1, 0" : : "r" (evtsel));
270 break;
271 }
272
273 for (i = CCNT; i < MAX_COUNTERS; i++) {
274 if (counter_config[i].event == EVT_UNUSED) {
275 counter_config[i].event = 0;
276 pmu->int_enable &= ~pmu->int_mask[i];
277 continue;
278 }
279
280 results[i].reset_counter = counter_config[i].count;
281 write_counter(i, -(u32)counter_config[i].count);
282 pmu->int_enable |= pmu->int_mask[i];
283 pr_debug("xscale_setup_ctrs: counter%d %#08x from %#08lx\n", i,
284 read_counter(i), counter_config[i].count);
285 }
286
287 return 0;
288}
289
290static void inline __xsc1_check_ctrs(void)
291{
292 int i;
293 u32 pmnc = read_pmnc();
294
295 /* NOTE: there's an A stepping errata that states if an overflow */
296 /* bit already exists and another occurs, the previous */
297 /* Overflow bit gets cleared. There's no workaround. */
298 /* Fixed in B stepping or later */
299
300 /* Write the value back to clear the overflow flags. Overflow */
301 /* flags remain in pmnc for use below */
302 write_pmnc(pmnc & ~PMU_ENABLE);
303
304 for (i = CCNT; i <= PMN1; i++) {
305 if (!(pmu->int_mask[i] & pmu->int_enable))
306 continue;
307
308 if (pmnc & pmu->cnt_ovf[i])
309 results[i].ovf++;
310 }
311}
312
313static void inline __xsc2_check_ctrs(void)
314{
315 int i;
316 u32 flag = 0, pmnc = read_pmnc();
317
318 pmnc &= ~PMU_ENABLE;
319 write_pmnc(pmnc);
320
321 /* read overflow flag register */
322 __asm__ __volatile__ ("mrc p14, 0, %0, c5, c1, 0" : "=r" (flag));
323
324 for (i = CCNT; i <= PMN3; i++) {
325 if (!(pmu->int_mask[i] & pmu->int_enable))
326 continue;
327
328 if (flag & pmu->cnt_ovf[i])
329 results[i].ovf++;
330 }
331
332 /* writeback clears overflow bits */
333 __asm__ __volatile__ ("mcr p14, 0, %0, c5, c1, 0" : : "r" (flag));
334}
335
336static irqreturn_t xscale_pmu_interrupt(int irq, void *arg)
337{
338 int i;
339 u32 pmnc;
340
341 if (pmu->id == PMU_XSC1)
342 __xsc1_check_ctrs();
343 else
344 __xsc2_check_ctrs();
345
346 for (i = CCNT; i < MAX_COUNTERS; i++) {
347 if (!results[i].ovf)
348 continue;
349
350 write_counter(i, -(u32)results[i].reset_counter);
351 oprofile_add_sample(get_irq_regs(), i);
352 results[i].ovf--;
353 }
354
355 pmnc = read_pmnc() | PMU_ENABLE;
356 write_pmnc(pmnc);
357
358 return IRQ_HANDLED;
359}
360
361static const struct pmu_irqs *pmu_irqs;
362
363static void xscale_pmu_stop(void)
364{
365 u32 pmnc = read_pmnc();
366
367 pmnc &= ~PMU_ENABLE;
368 write_pmnc(pmnc);
369
370 free_irq(pmu_irqs->irqs[0], results);
371 release_pmu(pmu_irqs);
372 pmu_irqs = NULL;
373}
374
375static int xscale_pmu_start(void)
376{
377 int ret;
378 u32 pmnc;
379
380 pmu_irqs = reserve_pmu();
381 if (IS_ERR(pmu_irqs))
382 return PTR_ERR(pmu_irqs);
383
384 pmnc = read_pmnc();
385
386 ret = request_irq(pmu_irqs->irqs[0], xscale_pmu_interrupt,
387 IRQF_DISABLED, "XScale PMU", (void *)results);
388
389 if (ret < 0) {
390 printk(KERN_ERR "oprofile: unable to request IRQ%d for XScale PMU\n",
391 pmu_irqs->irqs[0]);
392 release_pmu(pmu_irqs);
393 pmu_irqs = NULL;
394 return ret;
395 }
396
397 if (pmu->id == PMU_XSC1)
398 pmnc |= pmu->int_enable;
399 else {
400 __asm__ __volatile__ ("mcr p14, 0, %0, c4, c1, 0" : : "r" (pmu->int_enable));
401 pmnc &= ~PMU_CNT64;
402 }
403
404 pmnc |= PMU_ENABLE;
405 write_pmnc(pmnc);
406 pr_debug("xscale_pmu_start: pmnc: %#08x mask: %08x\n", pmnc, pmu->int_enable);
407 return 0;
408}
409
410static int xscale_detect_pmu(void)
411{
412 int ret = 0;
413 u32 id;
414
415 id = (read_cpuid(CPUID_ID) >> 13) & 0x7;
416
417 switch (id) {
418 case 1:
419 pmu = &pmu_parms[PMU_XSC1];
420 break;
421 case 2:
422 pmu = &pmu_parms[PMU_XSC2];
423 break;
424 default:
425 ret = -ENODEV;
426 break;
427 }
428
429 if (!ret) {
430 op_xscale_spec.name = pmu->name;
431 op_xscale_spec.num_counters = pmu->num_counters;
432 pr_debug("xscale_detect_pmu: detected %s PMU\n", pmu->name);
433 }
434
435 return ret;
436}
437
438struct op_arm_model_spec op_xscale_spec = {
439 .init = xscale_detect_pmu,
440 .setup_ctrs = xscale_setup_ctrs,
441 .start = xscale_pmu_start,
442 .stop = xscale_pmu_stop,
443};
444