aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/oprofile
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/oprofile')
-rw-r--r--arch/arm/oprofile/Makefile7
-rw-r--r--arch/arm/oprofile/backtrace.c83
-rw-r--r--arch/arm/oprofile/common.c375
-rw-r--r--arch/arm/oprofile/op_arm_model.h35
-rw-r--r--arch/arm/oprofile/op_counter.h27
-rw-r--r--arch/arm/oprofile/op_model_arm11_core.c162
-rw-r--r--arch/arm/oprofile/op_model_arm11_core.h45
-rw-r--r--arch/arm/oprofile/op_model_mpcore.c306
-rw-r--r--arch/arm/oprofile/op_model_mpcore.h61
-rw-r--r--arch/arm/oprofile/op_model_v6.c78
-rw-r--r--arch/arm/oprofile/op_model_v7.c415
-rw-r--r--arch/arm/oprofile/op_model_v7.h103
-rw-r--r--arch/arm/oprofile/op_model_xscale.c444
13 files changed, 310 insertions, 1831 deletions
diff --git a/arch/arm/oprofile/Makefile b/arch/arm/oprofile/Makefile
index 88e31f549f50..e666eafed152 100644
--- a/arch/arm/oprofile/Makefile
+++ b/arch/arm/oprofile/Makefile
@@ -6,9 +6,4 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
6 oprofilefs.o oprofile_stats.o \ 6 oprofilefs.o oprofile_stats.o \
7 timer_int.o ) 7 timer_int.o )
8 8
9oprofile-y := $(DRIVER_OBJS) common.o backtrace.o 9oprofile-y := $(DRIVER_OBJS) common.o
10oprofile-$(CONFIG_CPU_XSCALE) += op_model_xscale.o
11oprofile-$(CONFIG_OPROFILE_ARM11_CORE) += op_model_arm11_core.o
12oprofile-$(CONFIG_OPROFILE_ARMV6) += op_model_v6.o
13oprofile-$(CONFIG_OPROFILE_MPCORE) += op_model_mpcore.o
14oprofile-$(CONFIG_OPROFILE_ARMV7) += op_model_v7.o
diff --git a/arch/arm/oprofile/backtrace.c b/arch/arm/oprofile/backtrace.c
deleted file mode 100644
index d805a52b5032..000000000000
--- a/arch/arm/oprofile/backtrace.c
+++ /dev/null
@@ -1,83 +0,0 @@
1/*
2 * Arm specific backtracing code for oprofile
3 *
4 * Copyright 2005 Openedhand Ltd.
5 *
6 * Author: Richard Purdie <rpurdie@openedhand.com>
7 *
8 * Based on i386 oprofile backtrace code by John Levon, David Smith
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#include <linux/oprofile.h>
17#include <linux/sched.h>
18#include <linux/mm.h>
19#include <linux/uaccess.h>
20#include <asm/ptrace.h>
21#include <asm/stacktrace.h>
22
23static int report_trace(struct stackframe *frame, void *d)
24{
25 unsigned int *depth = d;
26
27 if (*depth) {
28 oprofile_add_trace(frame->pc);
29 (*depth)--;
30 }
31
32 return *depth == 0;
33}
34
35/*
36 * The registers we're interested in are at the end of the variable
37 * length saved register structure. The fp points at the end of this
38 * structure so the address of this struct is:
39 * (struct frame_tail *)(xxx->fp)-1
40 */
41struct frame_tail {
42 struct frame_tail *fp;
43 unsigned long sp;
44 unsigned long lr;
45} __attribute__((packed));
46
47static struct frame_tail* user_backtrace(struct frame_tail *tail)
48{
49 struct frame_tail buftail[2];
50
51 /* Also check accessibility of one struct frame_tail beyond */
52 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
53 return NULL;
54 if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail)))
55 return NULL;
56
57 oprofile_add_trace(buftail[0].lr);
58
59 /* frame pointers should strictly progress back up the stack
60 * (towards higher addresses) */
61 if (tail >= buftail[0].fp)
62 return NULL;
63
64 return buftail[0].fp-1;
65}
66
67void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
68{
69 struct frame_tail *tail = ((struct frame_tail *) regs->ARM_fp) - 1;
70
71 if (!user_mode(regs)) {
72 struct stackframe frame;
73 frame.fp = regs->ARM_fp;
74 frame.sp = regs->ARM_sp;
75 frame.lr = regs->ARM_lr;
76 frame.pc = regs->ARM_pc;
77 walk_stackframe(&frame, report_trace, &depth);
78 return;
79 }
80
81 while (depth-- && tail && !((unsigned long) tail & 3))
82 tail = user_backtrace(tail);
83}
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index 3fcd752d6146..0691176899ff 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -2,32 +2,184 @@
2 * @file common.c 2 * @file common.c
3 * 3 *
4 * @remark Copyright 2004 Oprofile Authors 4 * @remark Copyright 2004 Oprofile Authors
5 * @remark Copyright 2010 ARM Ltd.
5 * @remark Read the file COPYING 6 * @remark Read the file COPYING
6 * 7 *
7 * @author Zwane Mwaikambo 8 * @author Zwane Mwaikambo
9 * @author Will Deacon [move to perf]
8 */ 10 */
9 11
12#include <linux/cpumask.h>
13#include <linux/err.h>
14#include <linux/errno.h>
10#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/mutex.h>
11#include <linux/oprofile.h> 17#include <linux/oprofile.h>
12#include <linux/errno.h> 18#include <linux/perf_event.h>
19#include <linux/platform_device.h>
13#include <linux/slab.h> 20#include <linux/slab.h>
14#include <linux/sysdev.h> 21#include <asm/stacktrace.h>
15#include <linux/mutex.h> 22#include <linux/uaccess.h>
16 23
17#include "op_counter.h" 24#include <asm/perf_event.h>
18#include "op_arm_model.h" 25#include <asm/ptrace.h>
26
27#ifdef CONFIG_HW_PERF_EVENTS
28/*
29 * Per performance monitor configuration as set via oprofilefs.
30 */
31struct op_counter_config {
32 unsigned long count;
33 unsigned long enabled;
34 unsigned long event;
35 unsigned long unit_mask;
36 unsigned long kernel;
37 unsigned long user;
38 struct perf_event_attr attr;
39};
19 40
20static struct op_arm_model_spec *op_arm_model;
21static int op_arm_enabled; 41static int op_arm_enabled;
22static DEFINE_MUTEX(op_arm_mutex); 42static DEFINE_MUTEX(op_arm_mutex);
23 43
24struct op_counter_config *counter_config; 44static struct op_counter_config *counter_config;
45static struct perf_event **perf_events[nr_cpumask_bits];
46static int perf_num_counters;
47
48/*
49 * Overflow callback for oprofile.
50 */
51static void op_overflow_handler(struct perf_event *event, int unused,
52 struct perf_sample_data *data, struct pt_regs *regs)
53{
54 int id;
55 u32 cpu = smp_processor_id();
56
57 for (id = 0; id < perf_num_counters; ++id)
58 if (perf_events[cpu][id] == event)
59 break;
60
61 if (id != perf_num_counters)
62 oprofile_add_sample(regs, id);
63 else
64 pr_warning("oprofile: ignoring spurious overflow "
65 "on cpu %u\n", cpu);
66}
67
68/*
69 * Called by op_arm_setup to create perf attributes to mirror the oprofile
70 * settings in counter_config. Attributes are created as `pinned' events and
71 * so are permanently scheduled on the PMU.
72 */
73static void op_perf_setup(void)
74{
75 int i;
76 u32 size = sizeof(struct perf_event_attr);
77 struct perf_event_attr *attr;
78
79 for (i = 0; i < perf_num_counters; ++i) {
80 attr = &counter_config[i].attr;
81 memset(attr, 0, size);
82 attr->type = PERF_TYPE_RAW;
83 attr->size = size;
84 attr->config = counter_config[i].event;
85 attr->sample_period = counter_config[i].count;
86 attr->pinned = 1;
87 }
88}
89
90static int op_create_counter(int cpu, int event)
91{
92 int ret = 0;
93 struct perf_event *pevent;
94
95 if (!counter_config[event].enabled || (perf_events[cpu][event] != NULL))
96 return ret;
97
98 pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
99 cpu, -1,
100 op_overflow_handler);
101
102 if (IS_ERR(pevent)) {
103 ret = PTR_ERR(pevent);
104 } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
105 pr_warning("oprofile: failed to enable event %d "
106 "on CPU %d\n", event, cpu);
107 ret = -EBUSY;
108 } else {
109 perf_events[cpu][event] = pevent;
110 }
111
112 return ret;
113}
114
115static void op_destroy_counter(int cpu, int event)
116{
117 struct perf_event *pevent = perf_events[cpu][event];
118
119 if (pevent) {
120 perf_event_release_kernel(pevent);
121 perf_events[cpu][event] = NULL;
122 }
123}
124
125/*
126 * Called by op_arm_start to create active perf events based on the
127 * perviously configured attributes.
128 */
129static int op_perf_start(void)
130{
131 int cpu, event, ret = 0;
132
133 for_each_online_cpu(cpu) {
134 for (event = 0; event < perf_num_counters; ++event) {
135 ret = op_create_counter(cpu, event);
136 if (ret)
137 goto out;
138 }
139 }
140
141out:
142 return ret;
143}
144
145/*
146 * Called by op_arm_stop at the end of a profiling run.
147 */
148static void op_perf_stop(void)
149{
150 int cpu, event;
151
152 for_each_online_cpu(cpu)
153 for (event = 0; event < perf_num_counters; ++event)
154 op_destroy_counter(cpu, event);
155}
156
157
158static char *op_name_from_perf_id(enum arm_perf_pmu_ids id)
159{
160 switch (id) {
161 case ARM_PERF_PMU_ID_XSCALE1:
162 return "arm/xscale1";
163 case ARM_PERF_PMU_ID_XSCALE2:
164 return "arm/xscale2";
165 case ARM_PERF_PMU_ID_V6:
166 return "arm/armv6";
167 case ARM_PERF_PMU_ID_V6MP:
168 return "arm/mpcore";
169 case ARM_PERF_PMU_ID_CA8:
170 return "arm/armv7";
171 case ARM_PERF_PMU_ID_CA9:
172 return "arm/armv7-ca9";
173 default:
174 return NULL;
175 }
176}
25 177
26static int op_arm_create_files(struct super_block *sb, struct dentry *root) 178static int op_arm_create_files(struct super_block *sb, struct dentry *root)
27{ 179{
28 unsigned int i; 180 unsigned int i;
29 181
30 for (i = 0; i < op_arm_model->num_counters; i++) { 182 for (i = 0; i < perf_num_counters; i++) {
31 struct dentry *dir; 183 struct dentry *dir;
32 char buf[4]; 184 char buf[4];
33 185
@@ -46,12 +198,10 @@ static int op_arm_create_files(struct super_block *sb, struct dentry *root)
46 198
47static int op_arm_setup(void) 199static int op_arm_setup(void)
48{ 200{
49 int ret;
50
51 spin_lock(&oprofilefs_lock); 201 spin_lock(&oprofilefs_lock);
52 ret = op_arm_model->setup_ctrs(); 202 op_perf_setup();
53 spin_unlock(&oprofilefs_lock); 203 spin_unlock(&oprofilefs_lock);
54 return ret; 204 return 0;
55} 205}
56 206
57static int op_arm_start(void) 207static int op_arm_start(void)
@@ -60,8 +210,9 @@ static int op_arm_start(void)
60 210
61 mutex_lock(&op_arm_mutex); 211 mutex_lock(&op_arm_mutex);
62 if (!op_arm_enabled) { 212 if (!op_arm_enabled) {
63 ret = op_arm_model->start(); 213 ret = 0;
64 op_arm_enabled = !ret; 214 op_perf_start();
215 op_arm_enabled = 1;
65 } 216 }
66 mutex_unlock(&op_arm_mutex); 217 mutex_unlock(&op_arm_mutex);
67 return ret; 218 return ret;
@@ -71,113 +222,205 @@ static void op_arm_stop(void)
71{ 222{
72 mutex_lock(&op_arm_mutex); 223 mutex_lock(&op_arm_mutex);
73 if (op_arm_enabled) 224 if (op_arm_enabled)
74 op_arm_model->stop(); 225 op_perf_stop();
75 op_arm_enabled = 0; 226 op_arm_enabled = 0;
76 mutex_unlock(&op_arm_mutex); 227 mutex_unlock(&op_arm_mutex);
77} 228}
78 229
79#ifdef CONFIG_PM 230#ifdef CONFIG_PM
80static int op_arm_suspend(struct sys_device *dev, pm_message_t state) 231static int op_arm_suspend(struct platform_device *dev, pm_message_t state)
81{ 232{
82 mutex_lock(&op_arm_mutex); 233 mutex_lock(&op_arm_mutex);
83 if (op_arm_enabled) 234 if (op_arm_enabled)
84 op_arm_model->stop(); 235 op_perf_stop();
85 mutex_unlock(&op_arm_mutex); 236 mutex_unlock(&op_arm_mutex);
86 return 0; 237 return 0;
87} 238}
88 239
89static int op_arm_resume(struct sys_device *dev) 240static int op_arm_resume(struct platform_device *dev)
90{ 241{
91 mutex_lock(&op_arm_mutex); 242 mutex_lock(&op_arm_mutex);
92 if (op_arm_enabled && op_arm_model->start()) 243 if (op_arm_enabled && op_perf_start())
93 op_arm_enabled = 0; 244 op_arm_enabled = 0;
94 mutex_unlock(&op_arm_mutex); 245 mutex_unlock(&op_arm_mutex);
95 return 0; 246 return 0;
96} 247}
97 248
98static struct sysdev_class oprofile_sysclass = { 249static struct platform_driver oprofile_driver = {
99 .name = "oprofile", 250 .driver = {
251 .name = "arm-oprofile",
252 },
100 .resume = op_arm_resume, 253 .resume = op_arm_resume,
101 .suspend = op_arm_suspend, 254 .suspend = op_arm_suspend,
102}; 255};
103 256
104static struct sys_device device_oprofile = { 257static struct platform_device *oprofile_pdev;
105 .id = 0,
106 .cls = &oprofile_sysclass,
107};
108 258
109static int __init init_driverfs(void) 259static int __init init_driverfs(void)
110{ 260{
111 int ret; 261 int ret;
112 262
113 if (!(ret = sysdev_class_register(&oprofile_sysclass))) 263 ret = platform_driver_register(&oprofile_driver);
114 ret = sysdev_register(&device_oprofile); 264 if (ret)
265 goto out;
115 266
267 oprofile_pdev = platform_device_register_simple(
268 oprofile_driver.driver.name, 0, NULL, 0);
269 if (IS_ERR(oprofile_pdev)) {
270 ret = PTR_ERR(oprofile_pdev);
271 platform_driver_unregister(&oprofile_driver);
272 }
273
274out:
116 return ret; 275 return ret;
117} 276}
118 277
119static void exit_driverfs(void) 278static void exit_driverfs(void)
120{ 279{
121 sysdev_unregister(&device_oprofile); 280 platform_device_unregister(oprofile_pdev);
122 sysdev_class_unregister(&oprofile_sysclass); 281 platform_driver_unregister(&oprofile_driver);
123} 282}
124#else 283#else
125#define init_driverfs() do { } while (0) 284static int __init init_driverfs(void) { return 0; }
126#define exit_driverfs() do { } while (0) 285#define exit_driverfs() do { } while (0)
127#endif /* CONFIG_PM */ 286#endif /* CONFIG_PM */
128 287
129int __init oprofile_arch_init(struct oprofile_operations *ops) 288static int report_trace(struct stackframe *frame, void *d)
130{ 289{
131 struct op_arm_model_spec *spec = NULL; 290 unsigned int *depth = d;
132 int ret = -ENODEV;
133 291
134 ops->backtrace = arm_backtrace; 292 if (*depth) {
293 oprofile_add_trace(frame->pc);
294 (*depth)--;
295 }
135 296
136#ifdef CONFIG_CPU_XSCALE 297 return *depth == 0;
137 spec = &op_xscale_spec; 298}
138#endif
139 299
140#ifdef CONFIG_OPROFILE_ARMV6 300/*
141 spec = &op_armv6_spec; 301 * The registers we're interested in are at the end of the variable
142#endif 302 * length saved register structure. The fp points at the end of this
303 * structure so the address of this struct is:
304 * (struct frame_tail *)(xxx->fp)-1
305 */
306struct frame_tail {
307 struct frame_tail *fp;
308 unsigned long sp;
309 unsigned long lr;
310} __attribute__((packed));
143 311
144#ifdef CONFIG_OPROFILE_MPCORE 312static struct frame_tail* user_backtrace(struct frame_tail *tail)
145 spec = &op_mpcore_spec; 313{
146#endif 314 struct frame_tail buftail[2];
147 315
148#ifdef CONFIG_OPROFILE_ARMV7 316 /* Also check accessibility of one struct frame_tail beyond */
149 spec = &op_armv7_spec; 317 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
150#endif 318 return NULL;
319 if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail)))
320 return NULL;
151 321
152 if (spec) { 322 oprofile_add_trace(buftail[0].lr);
153 ret = spec->init();
154 if (ret < 0)
155 return ret;
156 323
157 counter_config = kcalloc(spec->num_counters, sizeof(struct op_counter_config), 324 /* frame pointers should strictly progress back up the stack
158 GFP_KERNEL); 325 * (towards higher addresses) */
159 if (!counter_config) 326 if (tail >= buftail[0].fp)
160 return -ENOMEM; 327 return NULL;
161 328
162 op_arm_model = spec; 329 return buftail[0].fp-1;
163 init_driverfs(); 330}
164 ops->create_files = op_arm_create_files; 331
165 ops->setup = op_arm_setup; 332static void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
166 ops->shutdown = op_arm_stop; 333{
167 ops->start = op_arm_start; 334 struct frame_tail *tail = ((struct frame_tail *) regs->ARM_fp) - 1;
168 ops->stop = op_arm_stop; 335
169 ops->cpu_type = op_arm_model->name; 336 if (!user_mode(regs)) {
170 printk(KERN_INFO "oprofile: using %s\n", spec->name); 337 struct stackframe frame;
338 frame.fp = regs->ARM_fp;
339 frame.sp = regs->ARM_sp;
340 frame.lr = regs->ARM_lr;
341 frame.pc = regs->ARM_pc;
342 walk_stackframe(&frame, report_trace, &depth);
343 return;
171 } 344 }
172 345
346 while (depth-- && tail && !((unsigned long) tail & 3))
347 tail = user_backtrace(tail);
348}
349
350int __init oprofile_arch_init(struct oprofile_operations *ops)
351{
352 int cpu, ret = 0;
353
354 perf_num_counters = armpmu_get_max_events();
355
356 counter_config = kcalloc(perf_num_counters,
357 sizeof(struct op_counter_config), GFP_KERNEL);
358
359 if (!counter_config) {
360 pr_info("oprofile: failed to allocate %d "
361 "counters\n", perf_num_counters);
362 return -ENOMEM;
363 }
364
365 ret = init_driverfs();
366 if (ret) {
367 kfree(counter_config);
368 return ret;
369 }
370
371 for_each_possible_cpu(cpu) {
372 perf_events[cpu] = kcalloc(perf_num_counters,
373 sizeof(struct perf_event *), GFP_KERNEL);
374 if (!perf_events[cpu]) {
375 pr_info("oprofile: failed to allocate %d perf events "
376 "for cpu %d\n", perf_num_counters, cpu);
377 while (--cpu >= 0)
378 kfree(perf_events[cpu]);
379 return -ENOMEM;
380 }
381 }
382
383 ops->backtrace = arm_backtrace;
384 ops->create_files = op_arm_create_files;
385 ops->setup = op_arm_setup;
386 ops->start = op_arm_start;
387 ops->stop = op_arm_stop;
388 ops->shutdown = op_arm_stop;
389 ops->cpu_type = op_name_from_perf_id(armpmu_get_pmu_id());
390
391 if (!ops->cpu_type)
392 ret = -ENODEV;
393 else
394 pr_info("oprofile: using %s\n", ops->cpu_type);
395
173 return ret; 396 return ret;
174} 397}
175 398
176void oprofile_arch_exit(void) 399void oprofile_arch_exit(void)
177{ 400{
178 if (op_arm_model) { 401 int cpu, id;
402 struct perf_event *event;
403
404 if (*perf_events) {
179 exit_driverfs(); 405 exit_driverfs();
180 op_arm_model = NULL; 406 for_each_possible_cpu(cpu) {
407 for (id = 0; id < perf_num_counters; ++id) {
408 event = perf_events[cpu][id];
409 if (event != NULL)
410 perf_event_release_kernel(event);
411 }
412 kfree(perf_events[cpu]);
413 }
181 } 414 }
182 kfree(counter_config); 415
416 if (counter_config)
417 kfree(counter_config);
418}
419#else
420int __init oprofile_arch_init(struct oprofile_operations *ops)
421{
422 pr_info("oprofile: hardware counters not available\n");
423 return -ENODEV;
183} 424}
425void oprofile_arch_exit(void) {}
426#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/arm/oprofile/op_arm_model.h b/arch/arm/oprofile/op_arm_model.h
deleted file mode 100644
index 8c4e4f6a1de3..000000000000
--- a/arch/arm/oprofile/op_arm_model.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/**
2 * @file op_arm_model.h
3 * interface to ARM machine specific operations
4 *
5 * @remark Copyright 2004 Oprofile Authors
6 * @remark Read the file COPYING
7 *
8 * @author Zwane Mwaikambo
9 */
10
11#ifndef OP_ARM_MODEL_H
12#define OP_ARM_MODEL_H
13
14struct op_arm_model_spec {
15 int (*init)(void);
16 unsigned int num_counters;
17 int (*setup_ctrs)(void);
18 int (*start)(void);
19 void (*stop)(void);
20 char *name;
21};
22
23#ifdef CONFIG_CPU_XSCALE
24extern struct op_arm_model_spec op_xscale_spec;
25#endif
26
27extern struct op_arm_model_spec op_armv6_spec;
28extern struct op_arm_model_spec op_mpcore_spec;
29extern struct op_arm_model_spec op_armv7_spec;
30
31extern void arm_backtrace(struct pt_regs * const regs, unsigned int depth);
32
33extern int __init op_arm_init(struct oprofile_operations *ops, struct op_arm_model_spec *spec);
34extern void op_arm_exit(void);
35#endif /* OP_ARM_MODEL_H */
diff --git a/arch/arm/oprofile/op_counter.h b/arch/arm/oprofile/op_counter.h
deleted file mode 100644
index ca942a63b52f..000000000000
--- a/arch/arm/oprofile/op_counter.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/**
2 * @file op_counter.h
3 *
4 * @remark Copyright 2004 Oprofile Authors
5 * @remark Read the file COPYING
6 *
7 * @author Zwane Mwaikambo
8 */
9
10#ifndef OP_COUNTER_H
11#define OP_COUNTER_H
12
13/* Per performance monitor configuration as set via
14 * oprofilefs.
15 */
16struct op_counter_config {
17 unsigned long count;
18 unsigned long enabled;
19 unsigned long event;
20 unsigned long unit_mask;
21 unsigned long kernel;
22 unsigned long user;
23};
24
25extern struct op_counter_config *counter_config;
26
27#endif /* OP_COUNTER_H */
diff --git a/arch/arm/oprofile/op_model_arm11_core.c b/arch/arm/oprofile/op_model_arm11_core.c
deleted file mode 100644
index ef3e2653b90c..000000000000
--- a/arch/arm/oprofile/op_model_arm11_core.c
+++ /dev/null
@@ -1,162 +0,0 @@
1/**
2 * @file op_model_arm11_core.c
3 * ARM11 Event Monitor Driver
4 * @remark Copyright 2004 ARM SMP Development Team
5 */
6#include <linux/types.h>
7#include <linux/errno.h>
8#include <linux/oprofile.h>
9#include <linux/interrupt.h>
10#include <linux/irq.h>
11#include <linux/smp.h>
12
13#include "op_counter.h"
14#include "op_arm_model.h"
15#include "op_model_arm11_core.h"
16
17/*
18 * ARM11 PMU support
19 */
20static inline void arm11_write_pmnc(u32 val)
21{
22 /* upper 4bits and 7, 11 are write-as-0 */
23 val &= 0x0ffff77f;
24 asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r" (val));
25}
26
27static inline u32 arm11_read_pmnc(void)
28{
29 u32 val;
30 asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r" (val));
31 return val;
32}
33
34static void arm11_reset_counter(unsigned int cnt)
35{
36 u32 val = -(u32)counter_config[CPU_COUNTER(smp_processor_id(), cnt)].count;
37 switch (cnt) {
38 case CCNT:
39 asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r" (val));
40 break;
41
42 case PMN0:
43 asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r" (val));
44 break;
45
46 case PMN1:
47 asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r" (val));
48 break;
49 }
50}
51
52int arm11_setup_pmu(void)
53{
54 unsigned int cnt;
55 u32 pmnc;
56
57 if (arm11_read_pmnc() & PMCR_E) {
58 printk(KERN_ERR "oprofile: CPU%u PMU still enabled when setup new event counter.\n", smp_processor_id());
59 return -EBUSY;
60 }
61
62 /* initialize PMNC, reset overflow, D bit, C bit and P bit. */
63 arm11_write_pmnc(PMCR_OFL_PMN0 | PMCR_OFL_PMN1 | PMCR_OFL_CCNT |
64 PMCR_C | PMCR_P);
65
66 for (pmnc = 0, cnt = PMN0; cnt <= CCNT; cnt++) {
67 unsigned long event;
68
69 if (!counter_config[CPU_COUNTER(smp_processor_id(), cnt)].enabled)
70 continue;
71
72 event = counter_config[CPU_COUNTER(smp_processor_id(), cnt)].event & 255;
73
74 /*
75 * Set event (if destined for PMNx counters)
76 */
77 if (cnt == PMN0) {
78 pmnc |= event << 20;
79 } else if (cnt == PMN1) {
80 pmnc |= event << 12;
81 }
82
83 /*
84 * We don't need to set the event if it's a cycle count
85 * Enable interrupt for this counter
86 */
87 pmnc |= PMCR_IEN_PMN0 << cnt;
88 arm11_reset_counter(cnt);
89 }
90 arm11_write_pmnc(pmnc);
91
92 return 0;
93}
94
95int arm11_start_pmu(void)
96{
97 arm11_write_pmnc(arm11_read_pmnc() | PMCR_E);
98 return 0;
99}
100
101int arm11_stop_pmu(void)
102{
103 unsigned int cnt;
104
105 arm11_write_pmnc(arm11_read_pmnc() & ~PMCR_E);
106
107 for (cnt = PMN0; cnt <= CCNT; cnt++)
108 arm11_reset_counter(cnt);
109
110 return 0;
111}
112
113/*
114 * CPU counters' IRQ handler (one IRQ per CPU)
115 */
116static irqreturn_t arm11_pmu_interrupt(int irq, void *arg)
117{
118 struct pt_regs *regs = get_irq_regs();
119 unsigned int cnt;
120 u32 pmnc;
121
122 pmnc = arm11_read_pmnc();
123
124 for (cnt = PMN0; cnt <= CCNT; cnt++) {
125 if ((pmnc & (PMCR_OFL_PMN0 << cnt)) && (pmnc & (PMCR_IEN_PMN0 << cnt))) {
126 arm11_reset_counter(cnt);
127 oprofile_add_sample(regs, CPU_COUNTER(smp_processor_id(), cnt));
128 }
129 }
130 /* Clear counter flag(s) */
131 arm11_write_pmnc(pmnc);
132 return IRQ_HANDLED;
133}
134
135int arm11_request_interrupts(const int *irqs, int nr)
136{
137 unsigned int i;
138 int ret = 0;
139
140 for(i = 0; i < nr; i++) {
141 ret = request_irq(irqs[i], arm11_pmu_interrupt, IRQF_DISABLED, "CP15 PMU", NULL);
142 if (ret != 0) {
143 printk(KERN_ERR "oprofile: unable to request IRQ%u for MPCORE-EM\n",
144 irqs[i]);
145 break;
146 }
147 }
148
149 if (i != nr)
150 while (i-- != 0)
151 free_irq(irqs[i], NULL);
152
153 return ret;
154}
155
156void arm11_release_interrupts(const int *irqs, int nr)
157{
158 unsigned int i;
159
160 for (i = 0; i < nr; i++)
161 free_irq(irqs[i], NULL);
162}
diff --git a/arch/arm/oprofile/op_model_arm11_core.h b/arch/arm/oprofile/op_model_arm11_core.h
deleted file mode 100644
index 1902b99d9dfd..000000000000
--- a/arch/arm/oprofile/op_model_arm11_core.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/**
2 * @file op_model_arm11_core.h
3 * ARM11 Event Monitor Driver
4 * @remark Copyright 2004 ARM SMP Development Team
5 * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
6 * @remark Copyright 2000-2004 MontaVista Software Inc
7 * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
8 * @remark Copyright 2004 Intel Corporation
9 * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
10 * @remark Copyright 2004 Oprofile Authors
11 *
12 * @remark Read the file COPYING
13 *
14 * @author Zwane Mwaikambo
15 */
16#ifndef OP_MODEL_ARM11_CORE_H
17#define OP_MODEL_ARM11_CORE_H
18
19/*
20 * Per-CPU PMCR
21 */
22#define PMCR_E (1 << 0) /* Enable */
23#define PMCR_P (1 << 1) /* Count reset */
24#define PMCR_C (1 << 2) /* Cycle counter reset */
25#define PMCR_D (1 << 3) /* Cycle counter counts every 64th cpu cycle */
26#define PMCR_IEN_PMN0 (1 << 4) /* Interrupt enable count reg 0 */
27#define PMCR_IEN_PMN1 (1 << 5) /* Interrupt enable count reg 1 */
28#define PMCR_IEN_CCNT (1 << 6) /* Interrupt enable cycle counter */
29#define PMCR_OFL_PMN0 (1 << 8) /* Count reg 0 overflow */
30#define PMCR_OFL_PMN1 (1 << 9) /* Count reg 1 overflow */
31#define PMCR_OFL_CCNT (1 << 10) /* Cycle counter overflow */
32
33#define PMN0 0
34#define PMN1 1
35#define CCNT 2
36
37#define CPU_COUNTER(cpu, counter) ((cpu) * 3 + (counter))
38
39int arm11_setup_pmu(void);
40int arm11_start_pmu(void);
41int arm11_stop_pmu(void);
42int arm11_request_interrupts(const int *, int);
43void arm11_release_interrupts(const int *, int);
44
45#endif
diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c
deleted file mode 100644
index f73ce875a395..000000000000
--- a/arch/arm/oprofile/op_model_mpcore.c
+++ /dev/null
@@ -1,306 +0,0 @@
1/**
2 * @file op_model_mpcore.c
3 * MPCORE Event Monitor Driver
4 * @remark Copyright 2004 ARM SMP Development Team
5 * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
6 * @remark Copyright 2000-2004 MontaVista Software Inc
7 * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
8 * @remark Copyright 2004 Intel Corporation
9 * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
10 * @remark Copyright 2004 Oprofile Authors
11 *
12 * @remark Read the file COPYING
13 *
14 * @author Zwane Mwaikambo
15 *
16 * Counters:
17 * 0: PMN0 on CPU0, per-cpu configurable event counter
18 * 1: PMN1 on CPU0, per-cpu configurable event counter
19 * 2: CCNT on CPU0
20 * 3: PMN0 on CPU1
21 * 4: PMN1 on CPU1
22 * 5: CCNT on CPU1
23 * 6: PMN0 on CPU1
24 * 7: PMN1 on CPU1
25 * 8: CCNT on CPU1
26 * 9: PMN0 on CPU1
27 * 10: PMN1 on CPU1
28 * 11: CCNT on CPU1
29 * 12-19: configurable SCU event counters
30 */
31
32/* #define DEBUG */
33#include <linux/types.h>
34#include <linux/errno.h>
35#include <linux/err.h>
36#include <linux/sched.h>
37#include <linux/oprofile.h>
38#include <linux/interrupt.h>
39#include <linux/smp.h>
40#include <linux/io.h>
41
42#include <asm/irq.h>
43#include <asm/mach/irq.h>
44#include <mach/hardware.h>
45#include <mach/board-eb.h>
46#include <asm/system.h>
47#include <asm/pmu.h>
48
49#include "op_counter.h"
50#include "op_arm_model.h"
51#include "op_model_arm11_core.h"
52#include "op_model_mpcore.h"
53
54/*
55 * MPCore SCU event monitor support
56 */
57#define SCU_EVENTMONITORS_VA_BASE __io_address(REALVIEW_EB11MP_SCU_BASE + 0x10)
58
59/*
60 * Bitmask of used SCU counters
61 */
62static unsigned int scu_em_used;
63static const struct pmu_irqs *pmu_irqs;
64
65/*
66 * 2 helper fns take a counter number from 0-7 (not the userspace-visible counter number)
67 */
68static inline void scu_reset_counter(struct eventmonitor __iomem *emc, unsigned int n)
69{
70 writel(-(u32)counter_config[SCU_COUNTER(n)].count, &emc->MC[n]);
71}
72
73static inline void scu_set_event(struct eventmonitor __iomem *emc, unsigned int n, u32 event)
74{
75 event &= 0xff;
76 writeb(event, &emc->MCEB[n]);
77}
78
79/*
80 * SCU counters' IRQ handler (one IRQ per counter => 2 IRQs per CPU)
81 */
82static irqreturn_t scu_em_interrupt(int irq, void *arg)
83{
84 struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
85 unsigned int cnt;
86
87 cnt = irq - IRQ_EB11MP_PMU_SCU0;
88 oprofile_add_sample(get_irq_regs(), SCU_COUNTER(cnt));
89 scu_reset_counter(emc, cnt);
90
91 /* Clear overflow flag for this counter */
92 writel(1 << (cnt + 16), &emc->PMCR);
93
94 return IRQ_HANDLED;
95}
96
97/* Configure just the SCU counters that the user has requested */
98static void scu_setup(void)
99{
100 struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
101 unsigned int i;
102
103 scu_em_used = 0;
104
105 for (i = 0; i < NUM_SCU_COUNTERS; i++) {
106 if (counter_config[SCU_COUNTER(i)].enabled &&
107 counter_config[SCU_COUNTER(i)].event) {
108 scu_set_event(emc, i, 0); /* disable counter for now */
109 scu_em_used |= 1 << i;
110 }
111 }
112}
113
114static int scu_start(void)
115{
116 struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
117 unsigned int temp, i;
118 unsigned long event;
119 int ret = 0;
120
121 /*
122 * request the SCU counter interrupts that we need
123 */
124 for (i = 0; i < NUM_SCU_COUNTERS; i++) {
125 if (scu_em_used & (1 << i)) {
126 ret = request_irq(IRQ_EB11MP_PMU_SCU0 + i, scu_em_interrupt, IRQF_DISABLED, "SCU PMU", NULL);
127 if (ret) {
128 printk(KERN_ERR "oprofile: unable to request IRQ%u for SCU Event Monitor\n",
129 IRQ_EB11MP_PMU_SCU0 + i);
130 goto err_free_scu;
131 }
132 }
133 }
134
135 /*
136 * clear overflow and enable interrupt for all used counters
137 */
138 temp = readl(&emc->PMCR);
139 for (i = 0; i < NUM_SCU_COUNTERS; i++) {
140 if (scu_em_used & (1 << i)) {
141 scu_reset_counter(emc, i);
142 event = counter_config[SCU_COUNTER(i)].event;
143 scu_set_event(emc, i, event);
144
145 /* clear overflow/interrupt */
146 temp |= 1 << (i + 16);
147 /* enable interrupt*/
148 temp |= 1 << (i + 8);
149 }
150 }
151
152 /* Enable all 8 counters */
153 temp |= PMCR_E;
154 writel(temp, &emc->PMCR);
155
156 return 0;
157
158 err_free_scu:
159 while (i--)
160 free_irq(IRQ_EB11MP_PMU_SCU0 + i, NULL);
161 return ret;
162}
163
164static void scu_stop(void)
165{
166 struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
167 unsigned int temp, i;
168
169 /* Disable counter interrupts */
170 /* Don't disable all 8 counters (with the E bit) as they may be in use */
171 temp = readl(&emc->PMCR);
172 for (i = 0; i < NUM_SCU_COUNTERS; i++) {
173 if (scu_em_used & (1 << i))
174 temp &= ~(1 << (i + 8));
175 }
176 writel(temp, &emc->PMCR);
177
178 /* Free counter interrupts and reset counters */
179 for (i = 0; i < NUM_SCU_COUNTERS; i++) {
180 if (scu_em_used & (1 << i)) {
181 scu_reset_counter(emc, i);
182 free_irq(IRQ_EB11MP_PMU_SCU0 + i, NULL);
183 }
184 }
185}
186
187struct em_function_data {
188 int (*fn)(void);
189 int ret;
190};
191
192static void em_func(void *data)
193{
194 struct em_function_data *d = data;
195 int ret = d->fn();
196 if (ret)
197 d->ret = ret;
198}
199
200static int em_call_function(int (*fn)(void))
201{
202 struct em_function_data data;
203
204 data.fn = fn;
205 data.ret = 0;
206
207 preempt_disable();
208 smp_call_function(em_func, &data, 1);
209 em_func(&data);
210 preempt_enable();
211
212 return data.ret;
213}
214
215/*
216 * Glue to stick the individual ARM11 PMUs and the SCU
217 * into the oprofile framework.
218 */
219static int em_setup_ctrs(void)
220{
221 int ret;
222
223 /* Configure CPU counters by cross-calling to the other CPUs */
224 ret = em_call_function(arm11_setup_pmu);
225 if (ret == 0)
226 scu_setup();
227
228 return 0;
229}
230
231static int em_start(void)
232{
233 int ret;
234
235 pmu_irqs = reserve_pmu();
236 if (IS_ERR(pmu_irqs)) {
237 ret = PTR_ERR(pmu_irqs);
238 goto out;
239 }
240
241 ret = arm11_request_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
242 if (ret == 0) {
243 em_call_function(arm11_start_pmu);
244
245 ret = scu_start();
246 if (ret) {
247 arm11_release_interrupts(pmu_irqs->irqs,
248 pmu_irqs->num_irqs);
249 } else {
250 release_pmu(pmu_irqs);
251 pmu_irqs = NULL;
252 }
253 }
254
255out:
256 return ret;
257}
258
259static void em_stop(void)
260{
261 em_call_function(arm11_stop_pmu);
262 arm11_release_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
263 scu_stop();
264 release_pmu(pmu_irqs);
265}
266
267/*
268 * Why isn't there a function to route an IRQ to a specific CPU in
269 * genirq?
270 */
271static void em_route_irq(int irq, unsigned int cpu)
272{
273 struct irq_desc *desc = irq_desc + irq;
274 const struct cpumask *mask = cpumask_of(cpu);
275
276 spin_lock_irq(&desc->lock);
277 cpumask_copy(desc->affinity, mask);
278 desc->chip->set_affinity(irq, mask);
279 spin_unlock_irq(&desc->lock);
280}
281
282static int em_setup(void)
283{
284 /*
285 * Send SCU PMU interrupts to the "owner" CPU.
286 */
287 em_route_irq(IRQ_EB11MP_PMU_SCU0, 0);
288 em_route_irq(IRQ_EB11MP_PMU_SCU1, 0);
289 em_route_irq(IRQ_EB11MP_PMU_SCU2, 1);
290 em_route_irq(IRQ_EB11MP_PMU_SCU3, 1);
291 em_route_irq(IRQ_EB11MP_PMU_SCU4, 2);
292 em_route_irq(IRQ_EB11MP_PMU_SCU5, 2);
293 em_route_irq(IRQ_EB11MP_PMU_SCU6, 3);
294 em_route_irq(IRQ_EB11MP_PMU_SCU7, 3);
295
296 return init_pmu();
297}
298
299struct op_arm_model_spec op_mpcore_spec = {
300 .init = em_setup,
301 .num_counters = MPCORE_NUM_COUNTERS,
302 .setup_ctrs = em_setup_ctrs,
303 .start = em_start,
304 .stop = em_stop,
305 .name = "arm/mpcore",
306};
diff --git a/arch/arm/oprofile/op_model_mpcore.h b/arch/arm/oprofile/op_model_mpcore.h
deleted file mode 100644
index 73d811023688..000000000000
--- a/arch/arm/oprofile/op_model_mpcore.h
+++ /dev/null
@@ -1,61 +0,0 @@
1/**
2 * @file op_model_mpcore.c
3 * MPCORE Event Monitor Driver
4 * @remark Copyright 2004 ARM SMP Development Team
5 * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
6 * @remark Copyright 2000-2004 MontaVista Software Inc
7 * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
8 * @remark Copyright 2004 Intel Corporation
9 * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
10 * @remark Copyright 2004 Oprofile Authors
11 *
12 * @remark Read the file COPYING
13 *
14 * @author Zwane Mwaikambo
15 */
16#ifndef OP_MODEL_MPCORE_H
17#define OP_MODEL_MPCORE_H
18
19struct eventmonitor {
20 unsigned long PMCR;
21 unsigned char MCEB[8];
22 unsigned long MC[8];
23};
24
25/*
26 * List of userspace counter numbers: note that the structure is important.
27 * The code relies on CPUn's counters being CPU0's counters + 3n
28 * and on CPU0's counters starting at 0
29 */
30
31#define COUNTER_CPU0_PMN0 0
32#define COUNTER_CPU0_PMN1 1
33#define COUNTER_CPU0_CCNT 2
34
35#define COUNTER_CPU1_PMN0 3
36#define COUNTER_CPU1_PMN1 4
37#define COUNTER_CPU1_CCNT 5
38
39#define COUNTER_CPU2_PMN0 6
40#define COUNTER_CPU2_PMN1 7
41#define COUNTER_CPU2_CCNT 8
42
43#define COUNTER_CPU3_PMN0 9
44#define COUNTER_CPU3_PMN1 10
45#define COUNTER_CPU3_CCNT 11
46
47#define COUNTER_SCU_MN0 12
48#define COUNTER_SCU_MN1 13
49#define COUNTER_SCU_MN2 14
50#define COUNTER_SCU_MN3 15
51#define COUNTER_SCU_MN4 16
52#define COUNTER_SCU_MN5 17
53#define COUNTER_SCU_MN6 18
54#define COUNTER_SCU_MN7 19
55#define NUM_SCU_COUNTERS 8
56
57#define SCU_COUNTER(number) ((number) + COUNTER_SCU_MN0)
58
59#define MPCORE_NUM_COUNTERS SCU_COUNTER(NUM_SCU_COUNTERS)
60
61#endif
diff --git a/arch/arm/oprofile/op_model_v6.c b/arch/arm/oprofile/op_model_v6.c
deleted file mode 100644
index a22357a2fd08..000000000000
--- a/arch/arm/oprofile/op_model_v6.c
+++ /dev/null
@@ -1,78 +0,0 @@
1/**
2 * @file op_model_v6.c
3 * ARM11 Performance Monitor Driver
4 *
5 * Based on op_model_xscale.c
6 *
7 * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
8 * @remark Copyright 2000-2004 MontaVista Software Inc
9 * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
10 * @remark Copyright 2004 Intel Corporation
11 * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
12 * @remark Copyright 2004 OProfile Authors
13 *
14 * @remark Read the file COPYING
15 *
16 * @author Tony Lindgren <tony@atomide.com>
17 */
18
19/* #define DEBUG */
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/sched.h>
24#include <linux/oprofile.h>
25#include <linux/interrupt.h>
26#include <asm/irq.h>
27#include <asm/system.h>
28#include <asm/pmu.h>
29
30#include "op_counter.h"
31#include "op_arm_model.h"
32#include "op_model_arm11_core.h"
33
34static const struct pmu_irqs *pmu_irqs;
35
36static void armv6_pmu_stop(void)
37{
38 arm11_stop_pmu();
39 arm11_release_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
40 release_pmu(pmu_irqs);
41 pmu_irqs = NULL;
42}
43
44static int armv6_pmu_start(void)
45{
46 int ret;
47
48 pmu_irqs = reserve_pmu();
49 if (IS_ERR(pmu_irqs)) {
50 ret = PTR_ERR(pmu_irqs);
51 goto out;
52 }
53
54 ret = arm11_request_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
55 if (ret >= 0) {
56 ret = arm11_start_pmu();
57 } else {
58 release_pmu(pmu_irqs);
59 pmu_irqs = NULL;
60 }
61
62out:
63 return ret;
64}
65
66static int armv6_detect_pmu(void)
67{
68 return 0;
69}
70
71struct op_arm_model_spec op_armv6_spec = {
72 .init = armv6_detect_pmu,
73 .num_counters = 3,
74 .setup_ctrs = arm11_setup_pmu,
75 .start = armv6_pmu_start,
76 .stop = armv6_pmu_stop,
77 .name = "arm/armv6",
78};
diff --git a/arch/arm/oprofile/op_model_v7.c b/arch/arm/oprofile/op_model_v7.c
deleted file mode 100644
index 8642d0891ae1..000000000000
--- a/arch/arm/oprofile/op_model_v7.c
+++ /dev/null
@@ -1,415 +0,0 @@
1/**
2 * op_model_v7.c
3 * ARM V7 (Cortex A8) Event Monitor Driver
4 *
5 * Copyright 2008 Jean Pihet <jpihet@mvista.com>
6 * Copyright 2004 ARM SMP Development Team
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/types.h>
13#include <linux/errno.h>
14#include <linux/err.h>
15#include <linux/oprofile.h>
16#include <linux/interrupt.h>
17#include <linux/irq.h>
18#include <linux/smp.h>
19
20#include <asm/pmu.h>
21
22#include "op_counter.h"
23#include "op_arm_model.h"
24#include "op_model_v7.h"
25
26/* #define DEBUG */
27
28
29/*
30 * ARM V7 PMNC support
31 */
32
33static u32 cnt_en[CNTMAX];
34
35static inline void armv7_pmnc_write(u32 val)
36{
37 val &= PMNC_MASK;
38 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (val));
39}
40
41static inline u32 armv7_pmnc_read(void)
42{
43 u32 val;
44
45 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
46 return val;
47}
48
49static inline u32 armv7_pmnc_enable_counter(unsigned int cnt)
50{
51 u32 val;
52
53 if (cnt >= CNTMAX) {
54 printk(KERN_ERR "oprofile: CPU%u enabling wrong PMNC counter"
55 " %d\n", smp_processor_id(), cnt);
56 return -1;
57 }
58
59 if (cnt == CCNT)
60 val = CNTENS_C;
61 else
62 val = (1 << (cnt - CNT0));
63
64 val &= CNTENS_MASK;
65 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
66
67 return cnt;
68}
69
70static inline u32 armv7_pmnc_disable_counter(unsigned int cnt)
71{
72 u32 val;
73
74 if (cnt >= CNTMAX) {
75 printk(KERN_ERR "oprofile: CPU%u disabling wrong PMNC counter"
76 " %d\n", smp_processor_id(), cnt);
77 return -1;
78 }
79
80 if (cnt == CCNT)
81 val = CNTENC_C;
82 else
83 val = (1 << (cnt - CNT0));
84
85 val &= CNTENC_MASK;
86 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
87
88 return cnt;
89}
90
91static inline u32 armv7_pmnc_enable_intens(unsigned int cnt)
92{
93 u32 val;
94
95 if (cnt >= CNTMAX) {
96 printk(KERN_ERR "oprofile: CPU%u enabling wrong PMNC counter"
97 " interrupt enable %d\n", smp_processor_id(), cnt);
98 return -1;
99 }
100
101 if (cnt == CCNT)
102 val = INTENS_C;
103 else
104 val = (1 << (cnt - CNT0));
105
106 val &= INTENS_MASK;
107 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
108
109 return cnt;
110}
111
112static inline u32 armv7_pmnc_getreset_flags(void)
113{
114 u32 val;
115
116 /* Read */
117 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
118
119 /* Write to clear flags */
120 val &= FLAG_MASK;
121 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
122
123 return val;
124}
125
126static inline int armv7_pmnc_select_counter(unsigned int cnt)
127{
128 u32 val;
129
130 if ((cnt == CCNT) || (cnt >= CNTMAX)) {
131 printk(KERN_ERR "oprofile: CPU%u selecting wrong PMNC counteri"
132 " %d\n", smp_processor_id(), cnt);
133 return -1;
134 }
135
136 val = (cnt - CNT0) & SELECT_MASK;
137 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
138
139 return cnt;
140}
141
142static inline void armv7_pmnc_write_evtsel(unsigned int cnt, u32 val)
143{
144 if (armv7_pmnc_select_counter(cnt) == cnt) {
145 val &= EVTSEL_MASK;
146 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
147 }
148}
149
150static void armv7_pmnc_reset_counter(unsigned int cnt)
151{
152 u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), cnt);
153 u32 val = -(u32)counter_config[cpu_cnt].count;
154
155 switch (cnt) {
156 case CCNT:
157 armv7_pmnc_disable_counter(cnt);
158
159 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (val));
160
161 if (cnt_en[cnt] != 0)
162 armv7_pmnc_enable_counter(cnt);
163
164 break;
165
166 case CNT0:
167 case CNT1:
168 case CNT2:
169 case CNT3:
170 armv7_pmnc_disable_counter(cnt);
171
172 if (armv7_pmnc_select_counter(cnt) == cnt)
173 asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (val));
174
175 if (cnt_en[cnt] != 0)
176 armv7_pmnc_enable_counter(cnt);
177
178 break;
179
180 default:
181 printk(KERN_ERR "oprofile: CPU%u resetting wrong PMNC counter"
182 " %d\n", smp_processor_id(), cnt);
183 break;
184 }
185}
186
187int armv7_setup_pmnc(void)
188{
189 unsigned int cnt;
190
191 if (armv7_pmnc_read() & PMNC_E) {
192 printk(KERN_ERR "oprofile: CPU%u PMNC still enabled when setup"
193 " new event counter.\n", smp_processor_id());
194 return -EBUSY;
195 }
196
197 /* Initialize & Reset PMNC: C bit and P bit */
198 armv7_pmnc_write(PMNC_P | PMNC_C);
199
200
201 for (cnt = CCNT; cnt < CNTMAX; cnt++) {
202 unsigned long event;
203 u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), cnt);
204
205 /*
206 * Disable counter
207 */
208 armv7_pmnc_disable_counter(cnt);
209 cnt_en[cnt] = 0;
210
211 if (!counter_config[cpu_cnt].enabled)
212 continue;
213
214 event = counter_config[cpu_cnt].event & 255;
215
216 /*
217 * Set event (if destined for PMNx counters)
218 * We don't need to set the event if it's a cycle count
219 */
220 if (cnt != CCNT)
221 armv7_pmnc_write_evtsel(cnt, event);
222
223 /*
224 * Enable interrupt for this counter
225 */
226 armv7_pmnc_enable_intens(cnt);
227
228 /*
229 * Reset counter
230 */
231 armv7_pmnc_reset_counter(cnt);
232
233 /*
234 * Enable counter
235 */
236 armv7_pmnc_enable_counter(cnt);
237 cnt_en[cnt] = 1;
238 }
239
240 return 0;
241}
242
243static inline void armv7_start_pmnc(void)
244{
245 armv7_pmnc_write(armv7_pmnc_read() | PMNC_E);
246}
247
248static inline void armv7_stop_pmnc(void)
249{
250 armv7_pmnc_write(armv7_pmnc_read() & ~PMNC_E);
251}
252
253/*
254 * CPU counters' IRQ handler (one IRQ per CPU)
255 */
256static irqreturn_t armv7_pmnc_interrupt(int irq, void *arg)
257{
258 struct pt_regs *regs = get_irq_regs();
259 unsigned int cnt;
260 u32 flags;
261
262
263 /*
264 * Stop IRQ generation
265 */
266 armv7_stop_pmnc();
267
268 /*
269 * Get and reset overflow status flags
270 */
271 flags = armv7_pmnc_getreset_flags();
272
273 /*
274 * Cycle counter
275 */
276 if (flags & FLAG_C) {
277 u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), CCNT);
278 armv7_pmnc_reset_counter(CCNT);
279 oprofile_add_sample(regs, cpu_cnt);
280 }
281
282 /*
283 * PMNC counters 0:3
284 */
285 for (cnt = CNT0; cnt < CNTMAX; cnt++) {
286 if (flags & (1 << (cnt - CNT0))) {
287 u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), cnt);
288 armv7_pmnc_reset_counter(cnt);
289 oprofile_add_sample(regs, cpu_cnt);
290 }
291 }
292
293 /*
294 * Allow IRQ generation
295 */
296 armv7_start_pmnc();
297
298 return IRQ_HANDLED;
299}
300
301int armv7_request_interrupts(const int *irqs, int nr)
302{
303 unsigned int i;
304 int ret = 0;
305
306 for (i = 0; i < nr; i++) {
307 ret = request_irq(irqs[i], armv7_pmnc_interrupt,
308 IRQF_DISABLED, "CP15 PMNC", NULL);
309 if (ret != 0) {
310 printk(KERN_ERR "oprofile: unable to request IRQ%u"
311 " for ARMv7\n",
312 irqs[i]);
313 break;
314 }
315 }
316
317 if (i != nr)
318 while (i-- != 0)
319 free_irq(irqs[i], NULL);
320
321 return ret;
322}
323
324void armv7_release_interrupts(const int *irqs, int nr)
325{
326 unsigned int i;
327
328 for (i = 0; i < nr; i++)
329 free_irq(irqs[i], NULL);
330}
331
332#ifdef DEBUG
333static void armv7_pmnc_dump_regs(void)
334{
335 u32 val;
336 unsigned int cnt;
337
338 printk(KERN_INFO "PMNC registers dump:\n");
339
340 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
341 printk(KERN_INFO "PMNC =0x%08x\n", val);
342
343 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
344 printk(KERN_INFO "CNTENS=0x%08x\n", val);
345
346 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
347 printk(KERN_INFO "INTENS=0x%08x\n", val);
348
349 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
350 printk(KERN_INFO "FLAGS =0x%08x\n", val);
351
352 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
353 printk(KERN_INFO "SELECT=0x%08x\n", val);
354
355 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
356 printk(KERN_INFO "CCNT =0x%08x\n", val);
357
358 for (cnt = CNT0; cnt < CNTMAX; cnt++) {
359 armv7_pmnc_select_counter(cnt);
360 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
361 printk(KERN_INFO "CNT[%d] count =0x%08x\n", cnt-CNT0, val);
362 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
363 printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", cnt-CNT0, val);
364 }
365}
366#endif
367
368static const struct pmu_irqs *pmu_irqs;
369
370static void armv7_pmnc_stop(void)
371{
372#ifdef DEBUG
373 armv7_pmnc_dump_regs();
374#endif
375 armv7_stop_pmnc();
376 armv7_release_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
377 release_pmu(pmu_irqs);
378 pmu_irqs = NULL;
379}
380
381static int armv7_pmnc_start(void)
382{
383 int ret;
384
385 pmu_irqs = reserve_pmu();
386 if (IS_ERR(pmu_irqs))
387 return PTR_ERR(pmu_irqs);
388
389#ifdef DEBUG
390 armv7_pmnc_dump_regs();
391#endif
392 ret = armv7_request_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
393 if (ret >= 0) {
394 armv7_start_pmnc();
395 } else {
396 release_pmu(pmu_irqs);
397 pmu_irqs = NULL;
398 }
399
400 return ret;
401}
402
403static int armv7_detect_pmnc(void)
404{
405 return 0;
406}
407
408struct op_arm_model_spec op_armv7_spec = {
409 .init = armv7_detect_pmnc,
410 .num_counters = 5,
411 .setup_ctrs = armv7_setup_pmnc,
412 .start = armv7_pmnc_start,
413 .stop = armv7_pmnc_stop,
414 .name = "arm/armv7",
415};
diff --git a/arch/arm/oprofile/op_model_v7.h b/arch/arm/oprofile/op_model_v7.h
deleted file mode 100644
index 9ca334b39c75..000000000000
--- a/arch/arm/oprofile/op_model_v7.h
+++ /dev/null
@@ -1,103 +0,0 @@
1/**
2 * op_model_v7.h
3 * ARM v7 (Cortex A8) Event Monitor Driver
4 *
5 * Copyright 2008 Jean Pihet <jpihet@mvista.com>
6 * Copyright 2004 ARM SMP Development Team
7 * Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
8 * Copyright 2000-2004 MontaVista Software Inc
9 * Copyright 2004 Dave Jiang <dave.jiang@intel.com>
10 * Copyright 2004 Intel Corporation
11 * Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
12 * Copyright 2004 Oprofile Authors
13 *
14 * Read the file COPYING
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as
18 * published by the Free Software Foundation.
19 */
20#ifndef OP_MODEL_V7_H
21#define OP_MODEL_V7_H
22
23/*
24 * Per-CPU PMNC: config reg
25 */
26#define PMNC_E (1 << 0) /* Enable all counters */
27#define PMNC_P (1 << 1) /* Reset all counters */
28#define PMNC_C (1 << 2) /* Cycle counter reset */
29#define PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
30#define PMNC_X (1 << 4) /* Export to ETM */
31#define PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
32#define PMNC_MASK 0x3f /* Mask for writable bits */
33
34/*
35 * Available counters
36 */
37#define CCNT 0
38#define CNT0 1
39#define CNT1 2
40#define CNT2 3
41#define CNT3 4
42#define CNTMAX 5
43
44#define CPU_COUNTER(cpu, counter) ((cpu) * CNTMAX + (counter))
45
46/*
47 * CNTENS: counters enable reg
48 */
49#define CNTENS_P0 (1 << 0)
50#define CNTENS_P1 (1 << 1)
51#define CNTENS_P2 (1 << 2)
52#define CNTENS_P3 (1 << 3)
53#define CNTENS_C (1 << 31)
54#define CNTENS_MASK 0x8000000f /* Mask for writable bits */
55
56/*
57 * CNTENC: counters disable reg
58 */
59#define CNTENC_P0 (1 << 0)
60#define CNTENC_P1 (1 << 1)
61#define CNTENC_P2 (1 << 2)
62#define CNTENC_P3 (1 << 3)
63#define CNTENC_C (1 << 31)
64#define CNTENC_MASK 0x8000000f /* Mask for writable bits */
65
66/*
67 * INTENS: counters overflow interrupt enable reg
68 */
69#define INTENS_P0 (1 << 0)
70#define INTENS_P1 (1 << 1)
71#define INTENS_P2 (1 << 2)
72#define INTENS_P3 (1 << 3)
73#define INTENS_C (1 << 31)
74#define INTENS_MASK 0x8000000f /* Mask for writable bits */
75
76/*
77 * EVTSEL: Event selection reg
78 */
79#define EVTSEL_MASK 0x7f /* Mask for writable bits */
80
81/*
82 * SELECT: Counter selection reg
83 */
84#define SELECT_MASK 0x1f /* Mask for writable bits */
85
86/*
87 * FLAG: counters overflow flag status reg
88 */
89#define FLAG_P0 (1 << 0)
90#define FLAG_P1 (1 << 1)
91#define FLAG_P2 (1 << 2)
92#define FLAG_P3 (1 << 3)
93#define FLAG_C (1 << 31)
94#define FLAG_MASK 0x8000000f /* Mask for writable bits */
95
96
97int armv7_setup_pmu(void);
98int armv7_start_pmu(void);
99int armv7_stop_pmu(void);
100int armv7_request_interrupts(const int *, int);
101void armv7_release_interrupts(const int *, int);
102
103#endif
diff --git a/arch/arm/oprofile/op_model_xscale.c b/arch/arm/oprofile/op_model_xscale.c
deleted file mode 100644
index 1d34a02048bd..000000000000
--- a/arch/arm/oprofile/op_model_xscale.c
+++ /dev/null
@@ -1,444 +0,0 @@
1/**
2 * @file op_model_xscale.c
3 * XScale Performance Monitor Driver
4 *
5 * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
6 * @remark Copyright 2000-2004 MontaVista Software Inc
7 * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
8 * @remark Copyright 2004 Intel Corporation
9 * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
10 * @remark Copyright 2004 OProfile Authors
11 *
12 * @remark Read the file COPYING
13 *
14 * @author Zwane Mwaikambo
15 */
16
17/* #define DEBUG */
18#include <linux/types.h>
19#include <linux/errno.h>
20#include <linux/err.h>
21#include <linux/sched.h>
22#include <linux/oprofile.h>
23#include <linux/interrupt.h>
24#include <linux/irq.h>
25
26#include <asm/cputype.h>
27#include <asm/pmu.h>
28
29#include "op_counter.h"
30#include "op_arm_model.h"
31
32#define PMU_ENABLE 0x001 /* Enable counters */
33#define PMN_RESET 0x002 /* Reset event counters */
34#define CCNT_RESET 0x004 /* Reset clock counter */
35#define PMU_RESET (CCNT_RESET | PMN_RESET)
36#define PMU_CNT64 0x008 /* Make CCNT count every 64th cycle */
37
38/*
39 * Different types of events that can be counted by the XScale PMU
40 * as used by Oprofile userspace. Here primarily for documentation
41 * purposes.
42 */
43
44#define EVT_ICACHE_MISS 0x00
45#define EVT_ICACHE_NO_DELIVER 0x01
46#define EVT_DATA_STALL 0x02
47#define EVT_ITLB_MISS 0x03
48#define EVT_DTLB_MISS 0x04
49#define EVT_BRANCH 0x05
50#define EVT_BRANCH_MISS 0x06
51#define EVT_INSTRUCTION 0x07
52#define EVT_DCACHE_FULL_STALL 0x08
53#define EVT_DCACHE_FULL_STALL_CONTIG 0x09
54#define EVT_DCACHE_ACCESS 0x0A
55#define EVT_DCACHE_MISS 0x0B
56#define EVT_DCACE_WRITE_BACK 0x0C
57#define EVT_PC_CHANGED 0x0D
58#define EVT_BCU_REQUEST 0x10
59#define EVT_BCU_FULL 0x11
60#define EVT_BCU_DRAIN 0x12
61#define EVT_BCU_ECC_NO_ELOG 0x14
62#define EVT_BCU_1_BIT_ERR 0x15
63#define EVT_RMW 0x16
64/* EVT_CCNT is not hardware defined */
65#define EVT_CCNT 0xFE
66#define EVT_UNUSED 0xFF
67
68struct pmu_counter {
69 volatile unsigned long ovf;
70 unsigned long reset_counter;
71};
72
73enum { CCNT, PMN0, PMN1, PMN2, PMN3, MAX_COUNTERS };
74
75static struct pmu_counter results[MAX_COUNTERS];
76
77/*
78 * There are two versions of the PMU in current XScale processors
79 * with differing register layouts and number of performance counters.
80 * e.g. IOP32x is xsc1 whilst IOP33x is xsc2.
81 * We detect which register layout to use in xscale_detect_pmu()
82 */
83enum { PMU_XSC1, PMU_XSC2 };
84
85struct pmu_type {
86 int id;
87 char *name;
88 int num_counters;
89 unsigned int int_enable;
90 unsigned int cnt_ovf[MAX_COUNTERS];
91 unsigned int int_mask[MAX_COUNTERS];
92};
93
94static struct pmu_type pmu_parms[] = {
95 {
96 .id = PMU_XSC1,
97 .name = "arm/xscale1",
98 .num_counters = 3,
99 .int_mask = { [PMN0] = 0x10, [PMN1] = 0x20,
100 [CCNT] = 0x40 },
101 .cnt_ovf = { [CCNT] = 0x400, [PMN0] = 0x100,
102 [PMN1] = 0x200},
103 },
104 {
105 .id = PMU_XSC2,
106 .name = "arm/xscale2",
107 .num_counters = 5,
108 .int_mask = { [CCNT] = 0x01, [PMN0] = 0x02,
109 [PMN1] = 0x04, [PMN2] = 0x08,
110 [PMN3] = 0x10 },
111 .cnt_ovf = { [CCNT] = 0x01, [PMN0] = 0x02,
112 [PMN1] = 0x04, [PMN2] = 0x08,
113 [PMN3] = 0x10 },
114 },
115};
116
117static struct pmu_type *pmu;
118
119static void write_pmnc(u32 val)
120{
121 if (pmu->id == PMU_XSC1) {
122 /* upper 4bits and 7, 11 are write-as-0 */
123 val &= 0xffff77f;
124 __asm__ __volatile__ ("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
125 } else {
126 /* bits 4-23 are write-as-0, 24-31 are write ignored */
127 val &= 0xf;
128 __asm__ __volatile__ ("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
129 }
130}
131
132static u32 read_pmnc(void)
133{
134 u32 val;
135
136 if (pmu->id == PMU_XSC1)
137 __asm__ __volatile__ ("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
138 else {
139 __asm__ __volatile__ ("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
140 /* bits 1-2 and 4-23 are read-unpredictable */
141 val &= 0xff000009;
142 }
143
144 return val;
145}
146
147static u32 __xsc1_read_counter(int counter)
148{
149 u32 val = 0;
150
151 switch (counter) {
152 case CCNT:
153 __asm__ __volatile__ ("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
154 break;
155 case PMN0:
156 __asm__ __volatile__ ("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
157 break;
158 case PMN1:
159 __asm__ __volatile__ ("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
160 break;
161 }
162 return val;
163}
164
165static u32 __xsc2_read_counter(int counter)
166{
167 u32 val = 0;
168
169 switch (counter) {
170 case CCNT:
171 __asm__ __volatile__ ("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
172 break;
173 case PMN0:
174 __asm__ __volatile__ ("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
175 break;
176 case PMN1:
177 __asm__ __volatile__ ("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
178 break;
179 case PMN2:
180 __asm__ __volatile__ ("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
181 break;
182 case PMN3:
183 __asm__ __volatile__ ("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
184 break;
185 }
186 return val;
187}
188
189static u32 read_counter(int counter)
190{
191 u32 val;
192
193 if (pmu->id == PMU_XSC1)
194 val = __xsc1_read_counter(counter);
195 else
196 val = __xsc2_read_counter(counter);
197
198 return val;
199}
200
201static void __xsc1_write_counter(int counter, u32 val)
202{
203 switch (counter) {
204 case CCNT:
205 __asm__ __volatile__ ("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
206 break;
207 case PMN0:
208 __asm__ __volatile__ ("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
209 break;
210 case PMN1:
211 __asm__ __volatile__ ("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
212 break;
213 }
214}
215
216static void __xsc2_write_counter(int counter, u32 val)
217{
218 switch (counter) {
219 case CCNT:
220 __asm__ __volatile__ ("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
221 break;
222 case PMN0:
223 __asm__ __volatile__ ("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
224 break;
225 case PMN1:
226 __asm__ __volatile__ ("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
227 break;
228 case PMN2:
229 __asm__ __volatile__ ("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
230 break;
231 case PMN3:
232 __asm__ __volatile__ ("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
233 break;
234 }
235}
236
237static void write_counter(int counter, u32 val)
238{
239 if (pmu->id == PMU_XSC1)
240 __xsc1_write_counter(counter, val);
241 else
242 __xsc2_write_counter(counter, val);
243}
244
245static int xscale_setup_ctrs(void)
246{
247 u32 evtsel, pmnc;
248 int i;
249
250 for (i = CCNT; i < MAX_COUNTERS; i++) {
251 if (counter_config[i].enabled)
252 continue;
253
254 counter_config[i].event = EVT_UNUSED;
255 }
256
257 switch (pmu->id) {
258 case PMU_XSC1:
259 pmnc = (counter_config[PMN1].event << 20) | (counter_config[PMN0].event << 12);
260 pr_debug("xscale_setup_ctrs: pmnc: %#08x\n", pmnc);
261 write_pmnc(pmnc);
262 break;
263
264 case PMU_XSC2:
265 evtsel = counter_config[PMN0].event | (counter_config[PMN1].event << 8) |
266 (counter_config[PMN2].event << 16) | (counter_config[PMN3].event << 24);
267
268 pr_debug("xscale_setup_ctrs: evtsel %#08x\n", evtsel);
269 __asm__ __volatile__ ("mcr p14, 0, %0, c8, c1, 0" : : "r" (evtsel));
270 break;
271 }
272
273 for (i = CCNT; i < MAX_COUNTERS; i++) {
274 if (counter_config[i].event == EVT_UNUSED) {
275 counter_config[i].event = 0;
276 pmu->int_enable &= ~pmu->int_mask[i];
277 continue;
278 }
279
280 results[i].reset_counter = counter_config[i].count;
281 write_counter(i, -(u32)counter_config[i].count);
282 pmu->int_enable |= pmu->int_mask[i];
283 pr_debug("xscale_setup_ctrs: counter%d %#08x from %#08lx\n", i,
284 read_counter(i), counter_config[i].count);
285 }
286
287 return 0;
288}
289
290static void inline __xsc1_check_ctrs(void)
291{
292 int i;
293 u32 pmnc = read_pmnc();
294
295 /* NOTE: there's an A stepping errata that states if an overflow */
296 /* bit already exists and another occurs, the previous */
297 /* Overflow bit gets cleared. There's no workaround. */
298 /* Fixed in B stepping or later */
299
300 /* Write the value back to clear the overflow flags. Overflow */
301 /* flags remain in pmnc for use below */
302 write_pmnc(pmnc & ~PMU_ENABLE);
303
304 for (i = CCNT; i <= PMN1; i++) {
305 if (!(pmu->int_mask[i] & pmu->int_enable))
306 continue;
307
308 if (pmnc & pmu->cnt_ovf[i])
309 results[i].ovf++;
310 }
311}
312
313static void inline __xsc2_check_ctrs(void)
314{
315 int i;
316 u32 flag = 0, pmnc = read_pmnc();
317
318 pmnc &= ~PMU_ENABLE;
319 write_pmnc(pmnc);
320
321 /* read overflow flag register */
322 __asm__ __volatile__ ("mrc p14, 0, %0, c5, c1, 0" : "=r" (flag));
323
324 for (i = CCNT; i <= PMN3; i++) {
325 if (!(pmu->int_mask[i] & pmu->int_enable))
326 continue;
327
328 if (flag & pmu->cnt_ovf[i])
329 results[i].ovf++;
330 }
331
332 /* writeback clears overflow bits */
333 __asm__ __volatile__ ("mcr p14, 0, %0, c5, c1, 0" : : "r" (flag));
334}
335
336static irqreturn_t xscale_pmu_interrupt(int irq, void *arg)
337{
338 int i;
339 u32 pmnc;
340
341 if (pmu->id == PMU_XSC1)
342 __xsc1_check_ctrs();
343 else
344 __xsc2_check_ctrs();
345
346 for (i = CCNT; i < MAX_COUNTERS; i++) {
347 if (!results[i].ovf)
348 continue;
349
350 write_counter(i, -(u32)results[i].reset_counter);
351 oprofile_add_sample(get_irq_regs(), i);
352 results[i].ovf--;
353 }
354
355 pmnc = read_pmnc() | PMU_ENABLE;
356 write_pmnc(pmnc);
357
358 return IRQ_HANDLED;
359}
360
361static const struct pmu_irqs *pmu_irqs;
362
363static void xscale_pmu_stop(void)
364{
365 u32 pmnc = read_pmnc();
366
367 pmnc &= ~PMU_ENABLE;
368 write_pmnc(pmnc);
369
370 free_irq(pmu_irqs->irqs[0], results);
371 release_pmu(pmu_irqs);
372 pmu_irqs = NULL;
373}
374
375static int xscale_pmu_start(void)
376{
377 int ret;
378 u32 pmnc;
379
380 pmu_irqs = reserve_pmu();
381 if (IS_ERR(pmu_irqs))
382 return PTR_ERR(pmu_irqs);
383
384 pmnc = read_pmnc();
385
386 ret = request_irq(pmu_irqs->irqs[0], xscale_pmu_interrupt,
387 IRQF_DISABLED, "XScale PMU", (void *)results);
388
389 if (ret < 0) {
390 printk(KERN_ERR "oprofile: unable to request IRQ%d for XScale PMU\n",
391 pmu_irqs->irqs[0]);
392 release_pmu(pmu_irqs);
393 pmu_irqs = NULL;
394 return ret;
395 }
396
397 if (pmu->id == PMU_XSC1)
398 pmnc |= pmu->int_enable;
399 else {
400 __asm__ __volatile__ ("mcr p14, 0, %0, c4, c1, 0" : : "r" (pmu->int_enable));
401 pmnc &= ~PMU_CNT64;
402 }
403
404 pmnc |= PMU_ENABLE;
405 write_pmnc(pmnc);
406 pr_debug("xscale_pmu_start: pmnc: %#08x mask: %08x\n", pmnc, pmu->int_enable);
407 return 0;
408}
409
410static int xscale_detect_pmu(void)
411{
412 int ret = 0;
413 u32 id;
414
415 id = (read_cpuid(CPUID_ID) >> 13) & 0x7;
416
417 switch (id) {
418 case 1:
419 pmu = &pmu_parms[PMU_XSC1];
420 break;
421 case 2:
422 pmu = &pmu_parms[PMU_XSC2];
423 break;
424 default:
425 ret = -ENODEV;
426 break;
427 }
428
429 if (!ret) {
430 op_xscale_spec.name = pmu->name;
431 op_xscale_spec.num_counters = pmu->num_counters;
432 pr_debug("xscale_detect_pmu: detected %s PMU\n", pmu->name);
433 }
434
435 return ret;
436}
437
438struct op_arm_model_spec op_xscale_spec = {
439 .init = xscale_detect_pmu,
440 .setup_ctrs = xscale_setup_ctrs,
441 .start = xscale_pmu_start,
442 .stop = xscale_pmu_stop,
443};
444