aboutsummaryrefslogtreecommitdiffstats
path: root/arch/alpha/oprofile
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/alpha/oprofile
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/alpha/oprofile')
-rw-r--r--arch/alpha/oprofile/Kconfig23
-rw-r--r--arch/alpha/oprofile/Makefile19
-rw-r--r--arch/alpha/oprofile/common.c189
-rw-r--r--arch/alpha/oprofile/op_impl.h55
-rw-r--r--arch/alpha/oprofile/op_model_ev4.c116
-rw-r--r--arch/alpha/oprofile/op_model_ev5.c211
-rw-r--r--arch/alpha/oprofile/op_model_ev6.c103
-rw-r--r--arch/alpha/oprofile/op_model_ev67.c263
8 files changed, 979 insertions, 0 deletions
diff --git a/arch/alpha/oprofile/Kconfig b/arch/alpha/oprofile/Kconfig
new file mode 100644
index 000000000000..5ade19801b97
--- /dev/null
+++ b/arch/alpha/oprofile/Kconfig
@@ -0,0 +1,23 @@
1
2menu "Profiling support"
3 depends on EXPERIMENTAL
4
5config PROFILING
6 bool "Profiling support (EXPERIMENTAL)"
7 help
8 Say Y here to enable the extended profiling support mechanisms used
9 by profilers such as OProfile.
10
11
12config OPROFILE
13 tristate "OProfile system profiling (EXPERIMENTAL)"
14 depends on PROFILING
15 help
16 OProfile is a profiling system capable of profiling the
17 whole system, include the kernel, kernel modules, libraries,
18 and applications.
19
20 If unsure, say N.
21
22endmenu
23
diff --git a/arch/alpha/oprofile/Makefile b/arch/alpha/oprofile/Makefile
new file mode 100644
index 000000000000..4aa56247bdc6
--- /dev/null
+++ b/arch/alpha/oprofile/Makefile
@@ -0,0 +1,19 @@
1EXTRA_CFLAGS := -Werror -Wno-sign-compare
2
3obj-$(CONFIG_OPROFILE) += oprofile.o
4
5DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
6 oprof.o cpu_buffer.o buffer_sync.o \
7 event_buffer.o oprofile_files.o \
8 oprofilefs.o oprofile_stats.o \
9 timer_int.o )
10
11oprofile-y := $(DRIVER_OBJS) common.o
12oprofile-$(CONFIG_ALPHA_GENERIC) += op_model_ev4.o \
13 op_model_ev5.o \
14 op_model_ev6.o \
15 op_model_ev67.o
16oprofile-$(CONFIG_ALPHA_EV4) += op_model_ev4.o
17oprofile-$(CONFIG_ALPHA_EV5) += op_model_ev5.o
18oprofile-$(CONFIG_ALPHA_EV6) += op_model_ev6.o \
19 op_model_ev67.o
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c
new file mode 100644
index 000000000000..908eb4af8dec
--- /dev/null
+++ b/arch/alpha/oprofile/common.c
@@ -0,0 +1,189 @@
1/**
2 * @file arch/alpha/oprofile/common.c
3 *
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author Richard Henderson <rth@twiddle.net>
8 */
9
10#include <linux/oprofile.h>
11#include <linux/init.h>
12#include <linux/smp.h>
13#include <linux/errno.h>
14#include <asm/ptrace.h>
15#include <asm/system.h>
16
17#include "op_impl.h"
18
19extern struct op_axp_model op_model_ev4 __attribute__((weak));
20extern struct op_axp_model op_model_ev5 __attribute__((weak));
21extern struct op_axp_model op_model_pca56 __attribute__((weak));
22extern struct op_axp_model op_model_ev6 __attribute__((weak));
23extern struct op_axp_model op_model_ev67 __attribute__((weak));
24
25static struct op_axp_model *model;
26
27extern void (*perf_irq)(unsigned long, struct pt_regs *);
28static void (*save_perf_irq)(unsigned long, struct pt_regs *);
29
30static struct op_counter_config ctr[20];
31static struct op_system_config sys;
32static struct op_register_config reg;
33
34/* Called from do_entInt to handle the performance monitor interrupt. */
35
36static void
37op_handle_interrupt(unsigned long which, struct pt_regs *regs)
38{
39 model->handle_interrupt(which, regs, ctr);
40
41 /* If the user has selected an interrupt frequency that is
42 not exactly the width of the counter, write a new value
43 into the counter such that it'll overflow after N more
44 events. */
45 if ((reg.need_reset >> which) & 1)
46 model->reset_ctr(&reg, which);
47}
48
49static int
50op_axp_setup(void)
51{
52 unsigned long i, e;
53
54 /* Install our interrupt handler into the existing hook. */
55 save_perf_irq = perf_irq;
56 perf_irq = op_handle_interrupt;
57
58 /* Compute the mask of enabled counters. */
59 for (i = e = 0; i < model->num_counters; ++i)
60 if (ctr[i].enabled)
61 e |= 1 << i;
62 reg.enable = e;
63
64 /* Pre-compute the values to stuff in the hardware registers. */
65 model->reg_setup(&reg, ctr, &sys);
66
67 /* Configure the registers on all cpus. */
68 smp_call_function(model->cpu_setup, &reg, 0, 1);
69 model->cpu_setup(&reg);
70 return 0;
71}
72
73static void
74op_axp_shutdown(void)
75{
76 /* Remove our interrupt handler. We may be removing this module. */
77 perf_irq = save_perf_irq;
78}
79
80static void
81op_axp_cpu_start(void *dummy)
82{
83 wrperfmon(1, reg.enable);
84}
85
86static int
87op_axp_start(void)
88{
89 smp_call_function(op_axp_cpu_start, NULL, 0, 1);
90 op_axp_cpu_start(NULL);
91 return 0;
92}
93
94static inline void
95op_axp_cpu_stop(void *dummy)
96{
97 /* Disable performance monitoring for all counters. */
98 wrperfmon(0, -1);
99}
100
101static void
102op_axp_stop(void)
103{
104 smp_call_function(op_axp_cpu_stop, NULL, 0, 1);
105 op_axp_cpu_stop(NULL);
106}
107
108static int
109op_axp_create_files(struct super_block * sb, struct dentry * root)
110{
111 int i;
112
113 for (i = 0; i < model->num_counters; ++i) {
114 struct dentry *dir;
115 char buf[3];
116
117 snprintf(buf, sizeof buf, "%d", i);
118 dir = oprofilefs_mkdir(sb, root, buf);
119
120 oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
121 oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
122 oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
123 /* Dummies. */
124 oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
125 oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
126 oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
127 }
128
129 if (model->can_set_proc_mode) {
130 oprofilefs_create_ulong(sb, root, "enable_pal",
131 &sys.enable_pal);
132 oprofilefs_create_ulong(sb, root, "enable_kernel",
133 &sys.enable_kernel);
134 oprofilefs_create_ulong(sb, root, "enable_user",
135 &sys.enable_user);
136 }
137
138 return 0;
139}
140
141int __init
142oprofile_arch_init(struct oprofile_operations *ops)
143{
144 struct op_axp_model *lmodel = NULL;
145
146 switch (implver()) {
147 case IMPLVER_EV4:
148 lmodel = &op_model_ev4;
149 break;
150 case IMPLVER_EV5:
151 /* 21164PC has a slightly different set of events.
152 Recognize the chip by the presence of the MAX insns. */
153 if (!amask(AMASK_MAX))
154 lmodel = &op_model_pca56;
155 else
156 lmodel = &op_model_ev5;
157 break;
158 case IMPLVER_EV6:
159 /* 21264A supports ProfileMe.
160 Recognize the chip by the presence of the CIX insns. */
161 if (!amask(AMASK_CIX))
162 lmodel = &op_model_ev67;
163 else
164 lmodel = &op_model_ev6;
165 break;
166 }
167
168 if (!lmodel)
169 return -ENODEV;
170 model = lmodel;
171
172 ops->create_files = op_axp_create_files;
173 ops->setup = op_axp_setup;
174 ops->shutdown = op_axp_shutdown;
175 ops->start = op_axp_start;
176 ops->stop = op_axp_stop;
177 ops->cpu_type = lmodel->cpu_type;
178
179 printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
180 lmodel->cpu_type);
181
182 return 0;
183}
184
185
186void
187oprofile_arch_exit(void)
188{
189}
diff --git a/arch/alpha/oprofile/op_impl.h b/arch/alpha/oprofile/op_impl.h
new file mode 100644
index 000000000000..6b97893c1a80
--- /dev/null
+++ b/arch/alpha/oprofile/op_impl.h
@@ -0,0 +1,55 @@
1/**
2 * @file arch/alpha/oprofile/op_impl.h
3 *
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author Richard Henderson <rth@twiddle.net>
8 */
9
10#ifndef OP_IMPL_H
11#define OP_IMPL_H 1
12
13/* Per-counter configuration as set via oprofilefs. */
14struct op_counter_config {
15 unsigned long enabled;
16 unsigned long event;
17 unsigned long count;
18 /* Dummies because I am too lazy to hack the userspace tools. */
19 unsigned long kernel;
20 unsigned long user;
21 unsigned long unit_mask;
22};
23
24/* System-wide configuration as set via oprofilefs. */
25struct op_system_config {
26 unsigned long enable_pal;
27 unsigned long enable_kernel;
28 unsigned long enable_user;
29};
30
31/* Cached values for the various performance monitoring registers. */
32struct op_register_config {
33 unsigned long enable;
34 unsigned long mux_select;
35 unsigned long proc_mode;
36 unsigned long freq;
37 unsigned long reset_values;
38 unsigned long need_reset;
39};
40
41/* Per-architecture configury and hooks. */
42struct op_axp_model {
43 void (*reg_setup) (struct op_register_config *,
44 struct op_counter_config *,
45 struct op_system_config *);
46 void (*cpu_setup) (void *);
47 void (*reset_ctr) (struct op_register_config *, unsigned long);
48 void (*handle_interrupt) (unsigned long, struct pt_regs *,
49 struct op_counter_config *);
50 char *cpu_type;
51 unsigned char num_counters;
52 unsigned char can_set_proc_mode;
53};
54
55#endif
diff --git a/arch/alpha/oprofile/op_model_ev4.c b/arch/alpha/oprofile/op_model_ev4.c
new file mode 100644
index 000000000000..80d764dbf22f
--- /dev/null
+++ b/arch/alpha/oprofile/op_model_ev4.c
@@ -0,0 +1,116 @@
1/**
2 * @file arch/alpha/oprofile/op_model_ev4.c
3 *
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author Richard Henderson <rth@twiddle.net>
8 */
9
10#include <linux/oprofile.h>
11#include <linux/init.h>
12#include <linux/smp.h>
13#include <asm/ptrace.h>
14#include <asm/system.h>
15
16#include "op_impl.h"
17
18
19/* Compute all of the registers in preparation for enabling profiling. */
20
21static void
22ev4_reg_setup(struct op_register_config *reg,
23 struct op_counter_config *ctr,
24 struct op_system_config *sys)
25{
26 unsigned long ctl = 0, count, hilo;
27
28 /* Select desired events. We've mapped the event numbers
29 such that they fit directly into the event selection fields.
30
31 Note that there is no "off" setting. In both cases we select
32 the EXTERNAL event source, hoping that it'll be the lowest
33 frequency, and set the frequency counter to LOW. The interrupts
34 for these "disabled" counter overflows are ignored by the
35 interrupt handler.
36
37 This is most irritating, because the hardware *can* enable and
38 disable the interrupts for these counters independently, but the
39 wrperfmon interface doesn't allow it. */
40
41 ctl |= (ctr[0].enabled ? ctr[0].event << 8 : 14 << 8);
42 ctl |= (ctr[1].enabled ? (ctr[1].event - 16) << 32 : 7ul << 32);
43
44 /* EV4 can not read or write its counter registers. The only
45 thing one can do at all is see if you overflow and get an
46 interrupt. We can set the width of the counters, to some
47 extent. Take the interrupt count selected by the user,
48 map it onto one of the possible values, and write it back. */
49
50 count = ctr[0].count;
51 if (count <= 4096)
52 count = 4096, hilo = 1;
53 else
54 count = 65536, hilo = 0;
55 ctr[0].count = count;
56 ctl |= (ctr[0].enabled && hilo) << 3;
57
58 count = ctr[1].count;
59 if (count <= 256)
60 count = 256, hilo = 1;
61 else
62 count = 4096, hilo = 0;
63 ctr[1].count = count;
64 ctl |= (ctr[1].enabled && hilo);
65
66 reg->mux_select = ctl;
67
68 /* Select performance monitoring options. */
69 /* ??? Need to come up with some mechanism to trace only
70 selected processes. EV4 does not have a mechanism to
71 select kernel or user mode only. For now, enable always. */
72 reg->proc_mode = 0;
73
74 /* Frequency is folded into mux_select for EV4. */
75 reg->freq = 0;
76
77 /* See above regarding no writes. */
78 reg->reset_values = 0;
79 reg->need_reset = 0;
80
81}
82
83/* Program all of the registers in preparation for enabling profiling. */
84
85static void
86ev4_cpu_setup(void *x)
87{
88 struct op_register_config *reg = x;
89
90 wrperfmon(2, reg->mux_select);
91 wrperfmon(3, reg->proc_mode);
92}
93
94static void
95ev4_handle_interrupt(unsigned long which, struct pt_regs *regs,
96 struct op_counter_config *ctr)
97{
98 /* EV4 can't properly disable counters individually.
99 Discard "disabled" events now. */
100 if (!ctr[which].enabled)
101 return;
102
103 /* Record the sample. */
104 oprofile_add_sample(regs, which);
105}
106
107
108struct op_axp_model op_model_ev4 = {
109 .reg_setup = ev4_reg_setup,
110 .cpu_setup = ev4_cpu_setup,
111 .reset_ctr = NULL,
112 .handle_interrupt = ev4_handle_interrupt,
113 .cpu_type = "alpha/ev4",
114 .num_counters = 2,
115 .can_set_proc_mode = 0,
116};
diff --git a/arch/alpha/oprofile/op_model_ev5.c b/arch/alpha/oprofile/op_model_ev5.c
new file mode 100644
index 000000000000..ceea6e1ad79a
--- /dev/null
+++ b/arch/alpha/oprofile/op_model_ev5.c
@@ -0,0 +1,211 @@
1/**
2 * @file arch/alpha/oprofile/op_model_ev5.c
3 *
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author Richard Henderson <rth@twiddle.net>
8 */
9
10#include <linux/oprofile.h>
11#include <linux/init.h>
12#include <linux/smp.h>
13#include <asm/ptrace.h>
14#include <asm/system.h>
15
16#include "op_impl.h"
17
18
19/* Compute all of the registers in preparation for enabling profiling.
20
21 The 21164 (EV5) and 21164PC (PCA65) vary in the bit placement and
22 meaning of the "CBOX" events. Given that we don't care about meaning
23 at this point, arrange for the difference in bit placement to be
24 handled by common code. */
25
26static void
27common_reg_setup(struct op_register_config *reg,
28 struct op_counter_config *ctr,
29 struct op_system_config *sys,
30 int cbox1_ofs, int cbox2_ofs)
31{
32 int i, ctl, reset, need_reset;
33
34 /* Select desired events. The event numbers are selected such
35 that they map directly into the event selection fields:
36
37 PCSEL0: 0, 1
38 PCSEL1: 24-39
39 CBOX1: 40-47
40 PCSEL2: 48-63
41 CBOX2: 64-71
42
43 There are two special cases, in that CYCLES can be measured
44 on PCSEL[02], and SCACHE_WRITE can be measured on CBOX[12].
45 These event numbers are canonicalizes to their first appearance. */
46
47 ctl = 0;
48 for (i = 0; i < 3; ++i) {
49 unsigned long event = ctr[i].event;
50 if (!ctr[i].enabled)
51 continue;
52
53 /* Remap the duplicate events, as described above. */
54 if (i == 2) {
55 if (event == 0)
56 event = 12+48;
57 else if (event == 2+41)
58 event = 4+65;
59 }
60
61 /* Convert the event numbers onto mux_select bit mask. */
62 if (event < 2)
63 ctl |= event << 31;
64 else if (event < 24)
65 /* error */;
66 else if (event < 40)
67 ctl |= (event - 24) << 4;
68 else if (event < 48)
69 ctl |= (event - 40) << cbox1_ofs | 15 << 4;
70 else if (event < 64)
71 ctl |= event - 48;
72 else if (event < 72)
73 ctl |= (event - 64) << cbox2_ofs | 15;
74 }
75 reg->mux_select = ctl;
76
77 /* Select processor mode. */
78 /* ??? Need to come up with some mechanism to trace only selected
79 processes. For now select from pal, kernel and user mode. */
80 ctl = 0;
81 ctl |= !sys->enable_pal << 9;
82 ctl |= !sys->enable_kernel << 8;
83 ctl |= !sys->enable_user << 30;
84 reg->proc_mode = ctl;
85
86 /* Select interrupt frequencies. Take the interrupt count selected
87 by the user, and map it onto one of the possible counter widths.
88 If the user value is in between, compute a value to which the
89 counter is reset at each interrupt. */
90
91 ctl = reset = need_reset = 0;
92 for (i = 0; i < 3; ++i) {
93 unsigned long max, hilo, count = ctr[i].count;
94 if (!ctr[i].enabled)
95 continue;
96
97 if (count <= 256)
98 count = 256, hilo = 3, max = 256;
99 else {
100 max = (i == 2 ? 16384 : 65536);
101 hilo = 2;
102 if (count > max)
103 count = max;
104 }
105 ctr[i].count = count;
106
107 ctl |= hilo << (8 - i*2);
108 reset |= (max - count) << (48 - 16*i);
109 if (count != max)
110 need_reset |= 1 << i;
111 }
112 reg->freq = ctl;
113 reg->reset_values = reset;
114 reg->need_reset = need_reset;
115}
116
117static void
118ev5_reg_setup(struct op_register_config *reg,
119 struct op_counter_config *ctr,
120 struct op_system_config *sys)
121{
122 common_reg_setup(reg, ctr, sys, 19, 22);
123}
124
125static void
126pca56_reg_setup(struct op_register_config *reg,
127 struct op_counter_config *ctr,
128 struct op_system_config *sys)
129{
130 common_reg_setup(reg, ctr, sys, 8, 11);
131}
132
133/* Program all of the registers in preparation for enabling profiling. */
134
135static void
136ev5_cpu_setup (void *x)
137{
138 struct op_register_config *reg = x;
139
140 wrperfmon(2, reg->mux_select);
141 wrperfmon(3, reg->proc_mode);
142 wrperfmon(4, reg->freq);
143 wrperfmon(6, reg->reset_values);
144}
145
146/* CTR is a counter for which the user has requested an interrupt count
147 in between one of the widths selectable in hardware. Reset the count
148 for CTR to the value stored in REG->RESET_VALUES.
149
150 For EV5, this means disabling profiling, reading the current values,
151 masking in the value for the desired register, writing, then turning
152 profiling back on.
153
154 This can be streamlined if profiling is only enabled for user mode.
155 In that case we know that the counters are not currently incrementing
156 (due to being in kernel mode). */
157
158static void
159ev5_reset_ctr(struct op_register_config *reg, unsigned long ctr)
160{
161 unsigned long values, mask, not_pk, reset_values;
162
163 mask = (ctr == 0 ? 0xfffful << 48
164 : ctr == 1 ? 0xfffful << 32
165 : 0x3fff << 16);
166
167 not_pk = 1 << 9 | 1 << 8;
168
169 reset_values = reg->reset_values;
170
171 if ((reg->proc_mode & not_pk) == not_pk) {
172 values = wrperfmon(5, 0);
173 values = (reset_values & mask) | (values & ~mask & -2);
174 wrperfmon(6, values);
175 } else {
176 wrperfmon(0, -1);
177 values = wrperfmon(5, 0);
178 values = (reset_values & mask) | (values & ~mask & -2);
179 wrperfmon(6, values);
180 wrperfmon(1, reg->enable);
181 }
182}
183
184static void
185ev5_handle_interrupt(unsigned long which, struct pt_regs *regs,
186 struct op_counter_config *ctr)
187{
188 /* Record the sample. */
189 oprofile_add_sample(regs, which);
190}
191
192
193struct op_axp_model op_model_ev5 = {
194 .reg_setup = ev5_reg_setup,
195 .cpu_setup = ev5_cpu_setup,
196 .reset_ctr = ev5_reset_ctr,
197 .handle_interrupt = ev5_handle_interrupt,
198 .cpu_type = "alpha/ev5",
199 .num_counters = 3,
200 .can_set_proc_mode = 1,
201};
202
203struct op_axp_model op_model_pca56 = {
204 .reg_setup = pca56_reg_setup,
205 .cpu_setup = ev5_cpu_setup,
206 .reset_ctr = ev5_reset_ctr,
207 .handle_interrupt = ev5_handle_interrupt,
208 .cpu_type = "alpha/pca56",
209 .num_counters = 3,
210 .can_set_proc_mode = 1,
211};
diff --git a/arch/alpha/oprofile/op_model_ev6.c b/arch/alpha/oprofile/op_model_ev6.c
new file mode 100644
index 000000000000..0869f85f5748
--- /dev/null
+++ b/arch/alpha/oprofile/op_model_ev6.c
@@ -0,0 +1,103 @@
1/**
2 * @file arch/alpha/oprofile/op_model_ev6.c
3 *
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author Richard Henderson <rth@twiddle.net>
8 */
9
10#include <linux/oprofile.h>
11#include <linux/init.h>
12#include <linux/smp.h>
13#include <asm/ptrace.h>
14#include <asm/system.h>
15
16#include "op_impl.h"
17
18
19/* Compute all of the registers in preparation for enabling profiling. */
20
21static void
22ev6_reg_setup(struct op_register_config *reg,
23 struct op_counter_config *ctr,
24 struct op_system_config *sys)
25{
26 unsigned long ctl, reset, need_reset, i;
27
28 /* Select desired events. We've mapped the event numbers
29 such that they fit directly into the event selection fields. */
30 ctl = 0;
31 if (ctr[0].enabled && ctr[0].event)
32 ctl |= (ctr[0].event & 1) << 4;
33 if (ctr[1].enabled)
34 ctl |= (ctr[1].event - 2) & 15;
35 reg->mux_select = ctl;
36
37 /* Select logging options. */
38 /* ??? Need to come up with some mechanism to trace only
39 selected processes. EV6 does not have a mechanism to
40 select kernel or user mode only. For now, enable always. */
41 reg->proc_mode = 0;
42
43 /* EV6 cannot change the width of the counters as with the
44 other implementations. But fortunately, we can write to
45 the counters and set the value such that it will overflow
46 at the right time. */
47 reset = need_reset = 0;
48 for (i = 0; i < 2; ++i) {
49 unsigned long count = ctr[i].count;
50 if (!ctr[i].enabled)
51 continue;
52
53 if (count > 0x100000)
54 count = 0x100000;
55 ctr[i].count = count;
56 reset |= (0x100000 - count) << (i ? 6 : 28);
57 if (count != 0x100000)
58 need_reset |= 1 << i;
59 }
60 reg->reset_values = reset;
61 reg->need_reset = need_reset;
62}
63
64/* Program all of the registers in preparation for enabling profiling. */
65
66static void
67ev6_cpu_setup (void *x)
68{
69 struct op_register_config *reg = x;
70
71 wrperfmon(2, reg->mux_select);
72 wrperfmon(3, reg->proc_mode);
73 wrperfmon(6, reg->reset_values | 3);
74}
75
76/* CTR is a counter for which the user has requested an interrupt count
77 in between one of the widths selectable in hardware. Reset the count
78 for CTR to the value stored in REG->RESET_VALUES. */
79
80static void
81ev6_reset_ctr(struct op_register_config *reg, unsigned long ctr)
82{
83 wrperfmon(6, reg->reset_values | (1 << ctr));
84}
85
86static void
87ev6_handle_interrupt(unsigned long which, struct pt_regs *regs,
88 struct op_counter_config *ctr)
89{
90 /* Record the sample. */
91 oprofile_add_sample(regs, which);
92}
93
94
95struct op_axp_model op_model_ev6 = {
96 .reg_setup = ev6_reg_setup,
97 .cpu_setup = ev6_cpu_setup,
98 .reset_ctr = ev6_reset_ctr,
99 .handle_interrupt = ev6_handle_interrupt,
100 .cpu_type = "alpha/ev6",
101 .num_counters = 2,
102 .can_set_proc_mode = 0,
103};
diff --git a/arch/alpha/oprofile/op_model_ev67.c b/arch/alpha/oprofile/op_model_ev67.c
new file mode 100644
index 000000000000..70302086283c
--- /dev/null
+++ b/arch/alpha/oprofile/op_model_ev67.c
@@ -0,0 +1,263 @@
1/**
2 * @file arch/alpha/oprofile/op_model_ev67.c
3 *
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author Richard Henderson <rth@twiddle.net>
8 * @author Falk Hueffner <falk@debian.org>
9 */
10
11#include <linux/oprofile.h>
12#include <linux/init.h>
13#include <linux/smp.h>
14#include <asm/ptrace.h>
15#include <asm/system.h>
16
17#include "op_impl.h"
18
19
20/* Compute all of the registers in preparation for enabling profiling. */
21
22static void
23ev67_reg_setup(struct op_register_config *reg,
24 struct op_counter_config *ctr,
25 struct op_system_config *sys)
26{
27 unsigned long ctl, reset, need_reset, i;
28
29 /* Select desired events. */
30 ctl = 1UL << 4; /* Enable ProfileMe mode. */
31
32 /* The event numbers are chosen so we can use them directly if
33 PCTR1 is enabled. */
34 if (ctr[1].enabled) {
35 ctl |= (ctr[1].event & 3) << 2;
36 } else {
37 if (ctr[0].event == 0) /* cycles */
38 ctl |= 1UL << 2;
39 }
40 reg->mux_select = ctl;
41
42 /* Select logging options. */
43 /* ??? Need to come up with some mechanism to trace only
44 selected processes. EV67 does not have a mechanism to
45 select kernel or user mode only. For now, enable always. */
46 reg->proc_mode = 0;
47
48 /* EV67 cannot change the width of the counters as with the
49 other implementations. But fortunately, we can write to
50 the counters and set the value such that it will overflow
51 at the right time. */
52 reset = need_reset = 0;
53 for (i = 0; i < 2; ++i) {
54 unsigned long count = ctr[i].count;
55 if (!ctr[i].enabled)
56 continue;
57
58 if (count > 0x100000)
59 count = 0x100000;
60 ctr[i].count = count;
61 reset |= (0x100000 - count) << (i ? 6 : 28);
62 if (count != 0x100000)
63 need_reset |= 1 << i;
64 }
65 reg->reset_values = reset;
66 reg->need_reset = need_reset;
67}
68
69/* Program all of the registers in preparation for enabling profiling. */
70
71static void
72ev67_cpu_setup (void *x)
73{
74 struct op_register_config *reg = x;
75
76 wrperfmon(2, reg->mux_select);
77 wrperfmon(3, reg->proc_mode);
78 wrperfmon(6, reg->reset_values | 3);
79}
80
81/* CTR is a counter for which the user has requested an interrupt count
82 in between one of the widths selectable in hardware. Reset the count
83 for CTR to the value stored in REG->RESET_VALUES. */
84
85static void
86ev67_reset_ctr(struct op_register_config *reg, unsigned long ctr)
87{
88 wrperfmon(6, reg->reset_values | (1 << ctr));
89}
90
91/* ProfileMe conditions which will show up as counters. We can also
92 detect the following, but it seems unlikely that anybody is
93 interested in counting them:
94 * Reset
95 * MT_FPCR (write to floating point control register)
96 * Arithmetic trap
97 * Dstream Fault
98 * Machine Check (ECC fault, etc.)
99 * OPCDEC (illegal opcode)
100 * Floating point disabled
101 * Differentiate between DTB single/double misses and 3 or 4 level
102 page tables
103 * Istream access violation
104 * Interrupt
105 * Icache Parity Error.
106 * Instruction killed (nop, trapb)
107
108 Unfortunately, there seems to be no way to detect Dcache and Bcache
109 misses; the latter could be approximated by making the counter
110 count Bcache misses, but that is not precise.
111
112 We model this as 20 counters:
113 * PCTR0
114 * PCTR1
115 * 9 ProfileMe events, induced by PCTR0
116 * 9 ProfileMe events, induced by PCTR1
117*/
118
119enum profileme_counters {
120 PM_STALLED, /* Stalled for at least one cycle
121 between the fetch and map stages */
122 PM_TAKEN, /* Conditional branch taken */
123 PM_MISPREDICT, /* Branch caused mispredict trap */
124 PM_ITB_MISS, /* ITB miss */
125 PM_DTB_MISS, /* DTB miss */
126 PM_REPLAY, /* Replay trap */
127 PM_LOAD_STORE, /* Load-store order trap */
128 PM_ICACHE_MISS, /* Icache miss */
129 PM_UNALIGNED, /* Unaligned Load/Store */
130 PM_NUM_COUNTERS
131};
132
133static inline void
134op_add_pm(unsigned long pc, int kern, unsigned long counter,
135 struct op_counter_config *ctr, unsigned long event)
136{
137 unsigned long fake_counter = 2 + event;
138 if (counter == 1)
139 fake_counter += PM_NUM_COUNTERS;
140 if (ctr[fake_counter].enabled)
141 oprofile_add_pc(pc, kern, fake_counter);
142}
143
144static void
145ev67_handle_interrupt(unsigned long which, struct pt_regs *regs,
146 struct op_counter_config *ctr)
147{
148 unsigned long pmpc, pctr_ctl;
149 int kern = !user_mode(regs);
150 int mispredict = 0;
151 union {
152 unsigned long v;
153 struct {
154 unsigned reserved: 30; /* 0-29 */
155 unsigned overcount: 3; /* 30-32 */
156 unsigned icache_miss: 1; /* 33 */
157 unsigned trap_type: 4; /* 34-37 */
158 unsigned load_store: 1; /* 38 */
159 unsigned trap: 1; /* 39 */
160 unsigned mispredict: 1; /* 40 */
161 } fields;
162 } i_stat;
163
164 enum trap_types {
165 TRAP_REPLAY,
166 TRAP_INVALID0,
167 TRAP_DTB_DOUBLE_MISS_3,
168 TRAP_DTB_DOUBLE_MISS_4,
169 TRAP_FP_DISABLED,
170 TRAP_UNALIGNED,
171 TRAP_DTB_SINGLE_MISS,
172 TRAP_DSTREAM_FAULT,
173 TRAP_OPCDEC,
174 TRAP_INVALID1,
175 TRAP_MACHINE_CHECK,
176 TRAP_INVALID2,
177 TRAP_ARITHMETIC,
178 TRAP_INVALID3,
179 TRAP_MT_FPCR,
180 TRAP_RESET
181 };
182
183 pmpc = wrperfmon(9, 0);
184 /* ??? Don't know how to handle physical-mode PALcode address. */
185 if (pmpc & 1)
186 return;
187 pmpc &= ~2; /* clear reserved bit */
188
189 i_stat.v = wrperfmon(8, 0);
190 if (i_stat.fields.trap) {
191 switch (i_stat.fields.trap_type) {
192 case TRAP_INVALID1:
193 case TRAP_INVALID2:
194 case TRAP_INVALID3:
195 /* Pipeline redirection ocurred. PMPC points
196 to PALcode. Recognize ITB miss by PALcode
197 offset address, and get actual PC from
198 EXC_ADDR. */
199 oprofile_add_pc(regs->pc, kern, which);
200 if ((pmpc & ((1 << 15) - 1)) == 581)
201 op_add_pm(regs->pc, kern, which,
202 ctr, PM_ITB_MISS);
203 /* Most other bit and counter values will be
204 those for the first instruction in the
205 fault handler, so we're done. */
206 return;
207 case TRAP_REPLAY:
208 op_add_pm(pmpc, kern, which, ctr,
209 (i_stat.fields.load_store
210 ? PM_LOAD_STORE : PM_REPLAY));
211 break;
212 case TRAP_DTB_DOUBLE_MISS_3:
213 case TRAP_DTB_DOUBLE_MISS_4:
214 case TRAP_DTB_SINGLE_MISS:
215 op_add_pm(pmpc, kern, which, ctr, PM_DTB_MISS);
216 break;
217 case TRAP_UNALIGNED:
218 op_add_pm(pmpc, kern, which, ctr, PM_UNALIGNED);
219 break;
220 case TRAP_INVALID0:
221 case TRAP_FP_DISABLED:
222 case TRAP_DSTREAM_FAULT:
223 case TRAP_OPCDEC:
224 case TRAP_MACHINE_CHECK:
225 case TRAP_ARITHMETIC:
226 case TRAP_MT_FPCR:
227 case TRAP_RESET:
228 break;
229 }
230
231 /* ??? JSR/JMP/RET/COR or HW_JSR/HW_JMP/HW_RET/HW_COR
232 mispredicts do not set this bit but can be
233 recognized by the presence of one of these
234 instructions at the PMPC location with bit 39
235 set. */
236 if (i_stat.fields.mispredict) {
237 mispredict = 1;
238 op_add_pm(pmpc, kern, which, ctr, PM_MISPREDICT);
239 }
240 }
241
242 oprofile_add_pc(pmpc, kern, which);
243
244 pctr_ctl = wrperfmon(5, 0);
245 if (pctr_ctl & (1UL << 27))
246 op_add_pm(pmpc, kern, which, ctr, PM_STALLED);
247
248 /* Unfortunately, TAK is undefined on mispredicted branches.
249 ??? It is also undefined for non-cbranch insns, should
250 check that. */
251 if (!mispredict && pctr_ctl & (1UL << 0))
252 op_add_pm(pmpc, kern, which, ctr, PM_TAKEN);
253}
254
255struct op_axp_model op_model_ev67 = {
256 .reg_setup = ev67_reg_setup,
257 .cpu_setup = ev67_cpu_setup,
258 .reset_ctr = ev67_reset_ctr,
259 .handle_interrupt = ev67_handle_interrupt,
260 .cpu_type = "alpha/ev67",
261 .num_counters = 20,
262 .can_set_proc_mode = 0,
263};