aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/apic_32.c4
-rw-r--r--arch/x86/kernel/apic_64.c4
-rw-r--r--arch/x86/oprofile/Makefile2
-rw-r--r--arch/x86/oprofile/nmi_int.c27
-rw-r--r--arch/x86/oprofile/op_model_amd.c543
-rw-r--r--arch/x86/oprofile/op_model_athlon.c190
-rw-r--r--arch/x86/oprofile/op_model_p4.c175
-rw-r--r--arch/x86/oprofile/op_x86_model.h4
8 files changed, 663 insertions, 286 deletions
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index f88bd0d982b0..0ff576d026a4 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -646,6 +646,9 @@ int setup_profiling_timer(unsigned int multiplier)
646 * 646 *
647 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and 647 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
648 * MCE interrupts are supported. Thus MCE offset must be set to 0. 648 * MCE interrupts are supported. Thus MCE offset must be set to 0.
649 *
650 * If mask=1, the LVT entry does not generate interrupts while mask=0
651 * enables the vector. See also the BKDGs.
649 */ 652 */
650 653
651#define APIC_EILVT_LVTOFF_MCE 0 654#define APIC_EILVT_LVTOFF_MCE 0
@@ -669,6 +672,7 @@ u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
669 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask); 672 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
670 return APIC_EILVT_LVTOFF_IBS; 673 return APIC_EILVT_LVTOFF_IBS;
671} 674}
675EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs);
672 676
673/* 677/*
674 * Local APIC start and shutdown 678 * Local APIC start and shutdown
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
index 446c062e831c..57744f4a75b4 100644
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@ -204,6 +204,9 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
204 * 204 *
205 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and 205 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
206 * MCE interrupts are supported. Thus MCE offset must be set to 0. 206 * MCE interrupts are supported. Thus MCE offset must be set to 0.
207 *
208 * If mask=1, the LVT entry does not generate interrupts while mask=0
209 * enables the vector. See also the BKDGs.
207 */ 210 */
208 211
209#define APIC_EILVT_LVTOFF_MCE 0 212#define APIC_EILVT_LVTOFF_MCE 0
@@ -228,6 +231,7 @@ u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
228 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask); 231 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
229 return APIC_EILVT_LVTOFF_IBS; 232 return APIC_EILVT_LVTOFF_IBS;
230} 233}
234EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs);
231 235
232/* 236/*
233 * Program the next event, relative to now 237 * Program the next event, relative to now
diff --git a/arch/x86/oprofile/Makefile b/arch/x86/oprofile/Makefile
index 30f3eb366667..446902b2a6b6 100644
--- a/arch/x86/oprofile/Makefile
+++ b/arch/x86/oprofile/Makefile
@@ -7,6 +7,6 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
7 timer_int.o ) 7 timer_int.o )
8 8
9oprofile-y := $(DRIVER_OBJS) init.o backtrace.o 9oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
10oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_athlon.o \ 10oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_amd.o \
11 op_model_ppro.o op_model_p4.o 11 op_model_ppro.o op_model_p4.o
12oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o 12oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 0227694f7dab..d98857410746 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -1,10 +1,11 @@
1/** 1/**
2 * @file nmi_int.c 2 * @file nmi_int.c
3 * 3 *
4 * @remark Copyright 2002 OProfile authors 4 * @remark Copyright 2002-2008 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 * @author Robert Richter <robert.richter@amd.com>
8 */ 9 */
9 10
10#include <linux/init.h> 11#include <linux/init.h>
@@ -437,6 +438,7 @@ int __init op_nmi_init(struct oprofile_operations *ops)
437 __u8 vendor = boot_cpu_data.x86_vendor; 438 __u8 vendor = boot_cpu_data.x86_vendor;
438 __u8 family = boot_cpu_data.x86; 439 __u8 family = boot_cpu_data.x86;
439 char *cpu_type; 440 char *cpu_type;
441 int ret = 0;
440 442
441 if (!cpu_has_apic) 443 if (!cpu_has_apic)
442 return -ENODEV; 444 return -ENODEV;
@@ -449,19 +451,23 @@ int __init op_nmi_init(struct oprofile_operations *ops)
449 default: 451 default:
450 return -ENODEV; 452 return -ENODEV;
451 case 6: 453 case 6:
452 model = &op_athlon_spec; 454 model = &op_amd_spec;
453 cpu_type = "i386/athlon"; 455 cpu_type = "i386/athlon";
454 break; 456 break;
455 case 0xf: 457 case 0xf:
456 model = &op_athlon_spec; 458 model = &op_amd_spec;
457 /* Actually it could be i386/hammer too, but give 459 /* Actually it could be i386/hammer too, but give
458 user space an consistent name. */ 460 user space an consistent name. */
459 cpu_type = "x86-64/hammer"; 461 cpu_type = "x86-64/hammer";
460 break; 462 break;
461 case 0x10: 463 case 0x10:
462 model = &op_athlon_spec; 464 model = &op_amd_spec;
463 cpu_type = "x86-64/family10"; 465 cpu_type = "x86-64/family10";
464 break; 466 break;
467 case 0x11:
468 model = &op_amd_spec;
469 cpu_type = "x86-64/family11h";
470 break;
465 } 471 }
466 break; 472 break;
467 473
@@ -488,17 +494,24 @@ int __init op_nmi_init(struct oprofile_operations *ops)
488 return -ENODEV; 494 return -ENODEV;
489 } 495 }
490 496
491 init_sysfs();
492#ifdef CONFIG_SMP 497#ifdef CONFIG_SMP
493 register_cpu_notifier(&oprofile_cpu_nb); 498 register_cpu_notifier(&oprofile_cpu_nb);
494#endif 499#endif
495 using_nmi = 1; 500 /* default values, can be overwritten by model */
496 ops->create_files = nmi_create_files; 501 ops->create_files = nmi_create_files;
497 ops->setup = nmi_setup; 502 ops->setup = nmi_setup;
498 ops->shutdown = nmi_shutdown; 503 ops->shutdown = nmi_shutdown;
499 ops->start = nmi_start; 504 ops->start = nmi_start;
500 ops->stop = nmi_stop; 505 ops->stop = nmi_stop;
501 ops->cpu_type = cpu_type; 506 ops->cpu_type = cpu_type;
507
508 if (model->init)
509 ret = model->init(ops);
510 if (ret)
511 return ret;
512
513 init_sysfs();
514 using_nmi = 1;
502 printk(KERN_INFO "oprofile: using NMI interrupt.\n"); 515 printk(KERN_INFO "oprofile: using NMI interrupt.\n");
503 return 0; 516 return 0;
504} 517}
@@ -511,4 +524,6 @@ void op_nmi_exit(void)
511 unregister_cpu_notifier(&oprofile_cpu_nb); 524 unregister_cpu_notifier(&oprofile_cpu_nb);
512#endif 525#endif
513 } 526 }
527 if (model->exit)
528 model->exit();
514} 529}
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
new file mode 100644
index 000000000000..d9faf607b3a6
--- /dev/null
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -0,0 +1,543 @@
1/*
2 * @file op_model_amd.c
3 * athlon / K7 / K8 / Family 10h model-specific MSR operations
4 *
5 * @remark Copyright 2002-2008 OProfile authors
6 * @remark Read the file COPYING
7 *
8 * @author John Levon
9 * @author Philippe Elie
10 * @author Graydon Hoare
11 * @author Robert Richter <robert.richter@amd.com>
12 * @author Barry Kasindorf
13*/
14
15#include <linux/oprofile.h>
16#include <linux/device.h>
17#include <linux/pci.h>
18
19#include <asm/ptrace.h>
20#include <asm/msr.h>
21#include <asm/nmi.h>
22
23#include "op_x86_model.h"
24#include "op_counter.h"
25
26#define NUM_COUNTERS 4
27#define NUM_CONTROLS 4
28
29#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
30#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0)
31#define CTR_WRITE(l, msrs, c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1); } while (0)
32#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
33
34#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
35#define CTRL_READ(l, h, msrs, c) do {rdmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
36#define CTRL_WRITE(l, h, msrs, c) do {wrmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
37#define CTRL_SET_ACTIVE(n) (n |= (1<<22))
38#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
39#define CTRL_CLEAR_LO(x) (x &= (1<<21))
40#define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0)
41#define CTRL_SET_ENABLE(val) (val |= 1<<20)
42#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16))
43#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17))
44#define CTRL_SET_UM(val, m) (val |= (m << 8))
45#define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff))
46#define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf))
47#define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9))
48#define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8))
49
50static unsigned long reset_value[NUM_COUNTERS];
51
52#ifdef CONFIG_OPROFILE_IBS
53
54/* IbsFetchCtl bits/masks */
55#define IBS_FETCH_HIGH_VALID_BIT (1UL << 17) /* bit 49 */
56#define IBS_FETCH_HIGH_ENABLE (1UL << 16) /* bit 48 */
57#define IBS_FETCH_LOW_MAX_CNT_MASK 0x0000FFFFUL /* MaxCnt mask */
58
59/*IbsOpCtl bits */
60#define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */
61#define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */
62
63/* Codes used in cpu_buffer.c */
64/* This produces duplicate code, need to be fixed */
65#define IBS_FETCH_BEGIN 3
66#define IBS_OP_BEGIN 4
67
68/* The function interface needs to be fixed, something like add
69 data. Should then be added to linux/oprofile.h. */
70extern void oprofile_add_ibs_sample(struct pt_regs *const regs,
71 unsigned int * const ibs_sample, u8 code);
72
73struct ibs_fetch_sample {
74 /* MSRC001_1031 IBS Fetch Linear Address Register */
75 unsigned int ibs_fetch_lin_addr_low;
76 unsigned int ibs_fetch_lin_addr_high;
77 /* MSRC001_1030 IBS Fetch Control Register */
78 unsigned int ibs_fetch_ctl_low;
79 unsigned int ibs_fetch_ctl_high;
80 /* MSRC001_1032 IBS Fetch Physical Address Register */
81 unsigned int ibs_fetch_phys_addr_low;
82 unsigned int ibs_fetch_phys_addr_high;
83};
84
85struct ibs_op_sample {
86 /* MSRC001_1034 IBS Op Logical Address Register (IbsRIP) */
87 unsigned int ibs_op_rip_low;
88 unsigned int ibs_op_rip_high;
89 /* MSRC001_1035 IBS Op Data Register */
90 unsigned int ibs_op_data1_low;
91 unsigned int ibs_op_data1_high;
92 /* MSRC001_1036 IBS Op Data 2 Register */
93 unsigned int ibs_op_data2_low;
94 unsigned int ibs_op_data2_high;
95 /* MSRC001_1037 IBS Op Data 3 Register */
96 unsigned int ibs_op_data3_low;
97 unsigned int ibs_op_data3_high;
98 /* MSRC001_1038 IBS DC Linear Address Register (IbsDcLinAd) */
99 unsigned int ibs_dc_linear_low;
100 unsigned int ibs_dc_linear_high;
101 /* MSRC001_1039 IBS DC Physical Address Register (IbsDcPhysAd) */
102 unsigned int ibs_dc_phys_low;
103 unsigned int ibs_dc_phys_high;
104};
105
106/*
107 * unitialize the APIC for the IBS interrupts if needed on AMD Family10h+
108*/
109static void clear_ibs_nmi(void);
110
111static int ibs_allowed; /* AMD Family10h and later */
112
113struct op_ibs_config {
114 unsigned long op_enabled;
115 unsigned long fetch_enabled;
116 unsigned long max_cnt_fetch;
117 unsigned long max_cnt_op;
118 unsigned long rand_en;
119 unsigned long dispatched_ops;
120};
121
122static struct op_ibs_config ibs_config;
123
124#endif
125
126/* functions for op_amd_spec */
127
128static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
129{
130 int i;
131
132 for (i = 0; i < NUM_COUNTERS; i++) {
133 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
134 msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
135 else
136 msrs->counters[i].addr = 0;
137 }
138
139 for (i = 0; i < NUM_CONTROLS; i++) {
140 if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i))
141 msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
142 else
143 msrs->controls[i].addr = 0;
144 }
145}
146
147
148static void op_amd_setup_ctrs(struct op_msrs const * const msrs)
149{
150 unsigned int low, high;
151 int i;
152
153 /* clear all counters */
154 for (i = 0 ; i < NUM_CONTROLS; ++i) {
155 if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
156 continue;
157 CTRL_READ(low, high, msrs, i);
158 CTRL_CLEAR_LO(low);
159 CTRL_CLEAR_HI(high);
160 CTRL_WRITE(low, high, msrs, i);
161 }
162
163 /* avoid a false detection of ctr overflows in NMI handler */
164 for (i = 0; i < NUM_COUNTERS; ++i) {
165 if (unlikely(!CTR_IS_RESERVED(msrs, i)))
166 continue;
167 CTR_WRITE(1, msrs, i);
168 }
169
170 /* enable active counters */
171 for (i = 0; i < NUM_COUNTERS; ++i) {
172 if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) {
173 reset_value[i] = counter_config[i].count;
174
175 CTR_WRITE(counter_config[i].count, msrs, i);
176
177 CTRL_READ(low, high, msrs, i);
178 CTRL_CLEAR_LO(low);
179 CTRL_CLEAR_HI(high);
180 CTRL_SET_ENABLE(low);
181 CTRL_SET_USR(low, counter_config[i].user);
182 CTRL_SET_KERN(low, counter_config[i].kernel);
183 CTRL_SET_UM(low, counter_config[i].unit_mask);
184 CTRL_SET_EVENT_LOW(low, counter_config[i].event);
185 CTRL_SET_EVENT_HIGH(high, counter_config[i].event);
186 CTRL_SET_HOST_ONLY(high, 0);
187 CTRL_SET_GUEST_ONLY(high, 0);
188
189 CTRL_WRITE(low, high, msrs, i);
190 } else {
191 reset_value[i] = 0;
192 }
193 }
194}
195
196#ifdef CONFIG_OPROFILE_IBS
197
198static inline int
199op_amd_handle_ibs(struct pt_regs * const regs,
200 struct op_msrs const * const msrs)
201{
202 unsigned int low, high;
203 struct ibs_fetch_sample ibs_fetch;
204 struct ibs_op_sample ibs_op;
205
206 if (!ibs_allowed)
207 return 1;
208
209 if (ibs_config.fetch_enabled) {
210 rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
211 if (high & IBS_FETCH_HIGH_VALID_BIT) {
212 ibs_fetch.ibs_fetch_ctl_high = high;
213 ibs_fetch.ibs_fetch_ctl_low = low;
214 rdmsr(MSR_AMD64_IBSFETCHLINAD, low, high);
215 ibs_fetch.ibs_fetch_lin_addr_high = high;
216 ibs_fetch.ibs_fetch_lin_addr_low = low;
217 rdmsr(MSR_AMD64_IBSFETCHPHYSAD, low, high);
218 ibs_fetch.ibs_fetch_phys_addr_high = high;
219 ibs_fetch.ibs_fetch_phys_addr_low = low;
220
221 oprofile_add_ibs_sample(regs,
222 (unsigned int *)&ibs_fetch,
223 IBS_FETCH_BEGIN);
224
225 /*reenable the IRQ */
226 rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
227 high &= ~IBS_FETCH_HIGH_VALID_BIT;
228 high |= IBS_FETCH_HIGH_ENABLE;
229 low &= IBS_FETCH_LOW_MAX_CNT_MASK;
230 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
231 }
232 }
233
234 if (ibs_config.op_enabled) {
235 rdmsr(MSR_AMD64_IBSOPCTL, low, high);
236 if (low & IBS_OP_LOW_VALID_BIT) {
237 rdmsr(MSR_AMD64_IBSOPRIP, low, high);
238 ibs_op.ibs_op_rip_low = low;
239 ibs_op.ibs_op_rip_high = high;
240 rdmsr(MSR_AMD64_IBSOPDATA, low, high);
241 ibs_op.ibs_op_data1_low = low;
242 ibs_op.ibs_op_data1_high = high;
243 rdmsr(MSR_AMD64_IBSOPDATA2, low, high);
244 ibs_op.ibs_op_data2_low = low;
245 ibs_op.ibs_op_data2_high = high;
246 rdmsr(MSR_AMD64_IBSOPDATA3, low, high);
247 ibs_op.ibs_op_data3_low = low;
248 ibs_op.ibs_op_data3_high = high;
249 rdmsr(MSR_AMD64_IBSDCLINAD, low, high);
250 ibs_op.ibs_dc_linear_low = low;
251 ibs_op.ibs_dc_linear_high = high;
252 rdmsr(MSR_AMD64_IBSDCPHYSAD, low, high);
253 ibs_op.ibs_dc_phys_low = low;
254 ibs_op.ibs_dc_phys_high = high;
255
256 /* reenable the IRQ */
257 oprofile_add_ibs_sample(regs,
258 (unsigned int *)&ibs_op,
259 IBS_OP_BEGIN);
260 rdmsr(MSR_AMD64_IBSOPCTL, low, high);
261 high = 0;
262 low &= ~IBS_OP_LOW_VALID_BIT;
263 low |= IBS_OP_LOW_ENABLE;
264 wrmsr(MSR_AMD64_IBSOPCTL, low, high);
265 }
266 }
267
268 return 1;
269}
270
271#endif
272
273static int op_amd_check_ctrs(struct pt_regs * const regs,
274 struct op_msrs const * const msrs)
275{
276 unsigned int low, high;
277 int i;
278
279 for (i = 0 ; i < NUM_COUNTERS; ++i) {
280 if (!reset_value[i])
281 continue;
282 CTR_READ(low, high, msrs, i);
283 if (CTR_OVERFLOWED(low)) {
284 oprofile_add_sample(regs, i);
285 CTR_WRITE(reset_value[i], msrs, i);
286 }
287 }
288
289#ifdef CONFIG_OPROFILE_IBS
290 op_amd_handle_ibs(regs, msrs);
291#endif
292
293 /* See op_model_ppro.c */
294 return 1;
295}
296
297static void op_amd_start(struct op_msrs const * const msrs)
298{
299 unsigned int low, high;
300 int i;
301 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
302 if (reset_value[i]) {
303 CTRL_READ(low, high, msrs, i);
304 CTRL_SET_ACTIVE(low);
305 CTRL_WRITE(low, high, msrs, i);
306 }
307 }
308
309#ifdef CONFIG_OPROFILE_IBS
310 if (ibs_allowed && ibs_config.fetch_enabled) {
311 low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
312 high = IBS_FETCH_HIGH_ENABLE;
313 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
314 }
315
316 if (ibs_allowed && ibs_config.op_enabled) {
317 low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) + IBS_OP_LOW_ENABLE;
318 high = 0;
319 wrmsr(MSR_AMD64_IBSOPCTL, low, high);
320 }
321#endif
322}
323
324
325static void op_amd_stop(struct op_msrs const * const msrs)
326{
327 unsigned int low, high;
328 int i;
329
330 /* Subtle: stop on all counters to avoid race with
331 * setting our pm callback */
332 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
333 if (!reset_value[i])
334 continue;
335 CTRL_READ(low, high, msrs, i);
336 CTRL_SET_INACTIVE(low);
337 CTRL_WRITE(low, high, msrs, i);
338 }
339
340#ifdef CONFIG_OPROFILE_IBS
341 if (ibs_allowed && ibs_config.fetch_enabled) {
342 low = 0; /* clear max count and enable */
343 high = 0;
344 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
345 }
346
347 if (ibs_allowed && ibs_config.op_enabled) {
348 low = 0; /* clear max count and enable */
349 high = 0;
350 wrmsr(MSR_AMD64_IBSOPCTL, low, high);
351 }
352#endif
353}
354
355static void op_amd_shutdown(struct op_msrs const * const msrs)
356{
357 int i;
358
359 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
360 if (CTR_IS_RESERVED(msrs, i))
361 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
362 }
363 for (i = 0 ; i < NUM_CONTROLS ; ++i) {
364 if (CTRL_IS_RESERVED(msrs, i))
365 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
366 }
367}
368
369#ifndef CONFIG_OPROFILE_IBS
370
371/* no IBS support */
372
373static int op_amd_init(struct oprofile_operations *ops)
374{
375 return 0;
376}
377
378static void op_amd_exit(void) {}
379
380#else
381
382static u8 ibs_eilvt_off;
383
384static inline void apic_init_ibs_nmi_per_cpu(void *arg)
385{
386 ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0);
387}
388
389static inline void apic_clear_ibs_nmi_per_cpu(void *arg)
390{
391 setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
392}
393
394static int pfm_amd64_setup_eilvt(void)
395{
396#define IBSCTL_LVTOFFSETVAL (1 << 8)
397#define IBSCTL 0x1cc
398 struct pci_dev *cpu_cfg;
399 int nodes;
400 u32 value = 0;
401
402 /* per CPU setup */
403 on_each_cpu(apic_init_ibs_nmi_per_cpu, NULL, 1);
404
405 nodes = 0;
406 cpu_cfg = NULL;
407 do {
408 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
409 PCI_DEVICE_ID_AMD_10H_NB_MISC,
410 cpu_cfg);
411 if (!cpu_cfg)
412 break;
413 ++nodes;
414 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
415 | IBSCTL_LVTOFFSETVAL);
416 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
417 if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) {
418 printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
419 "IBSCTL = 0x%08x", value);
420 return 1;
421 }
422 } while (1);
423
424 if (!nodes) {
425 printk(KERN_DEBUG "No CPU node configured for IBS");
426 return 1;
427 }
428
429#ifdef CONFIG_NUMA
430 /* Sanity check */
431 /* Works only for 64bit with proper numa implementation. */
432 if (nodes != num_possible_nodes()) {
433 printk(KERN_DEBUG "Failed to setup CPU node(s) for IBS, "
434 "found: %d, expected %d",
435 nodes, num_possible_nodes());
436 return 1;
437 }
438#endif
439 return 0;
440}
441
442/*
443 * initialize the APIC for the IBS interrupts
444 * if available (AMD Family10h rev B0 and later)
445 */
446static void setup_ibs(void)
447{
448 ibs_allowed = boot_cpu_has(X86_FEATURE_IBS);
449
450 if (!ibs_allowed)
451 return;
452
453 if (pfm_amd64_setup_eilvt()) {
454 ibs_allowed = 0;
455 return;
456 }
457
458 printk(KERN_INFO "oprofile: AMD IBS detected\n");
459}
460
461
462/*
463 * unitialize the APIC for the IBS interrupts if needed on AMD Family10h
464 * rev B0 and later */
465static void clear_ibs_nmi(void)
466{
467 if (ibs_allowed)
468 on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1);
469}
470
471static int (*create_arch_files)(struct super_block * sb, struct dentry * root);
472
473static int setup_ibs_files(struct super_block * sb, struct dentry * root)
474{
475 char buf[12];
476 struct dentry *dir;
477 int ret = 0;
478
479 /* architecture specific files */
480 if (create_arch_files)
481 ret = create_arch_files(sb, root);
482
483 if (ret)
484 return ret;
485
486 if (!ibs_allowed)
487 return ret;
488
489 /* model specific files */
490
491 /* setup some reasonable defaults */
492 ibs_config.max_cnt_fetch = 250000;
493 ibs_config.fetch_enabled = 0;
494 ibs_config.max_cnt_op = 250000;
495 ibs_config.op_enabled = 0;
496 ibs_config.dispatched_ops = 1;
497 snprintf(buf, sizeof(buf), "ibs_fetch");
498 dir = oprofilefs_mkdir(sb, root, buf);
499 oprofilefs_create_ulong(sb, dir, "rand_enable",
500 &ibs_config.rand_en);
501 oprofilefs_create_ulong(sb, dir, "enable",
502 &ibs_config.fetch_enabled);
503 oprofilefs_create_ulong(sb, dir, "max_count",
504 &ibs_config.max_cnt_fetch);
505 snprintf(buf, sizeof(buf), "ibs_uops");
506 dir = oprofilefs_mkdir(sb, root, buf);
507 oprofilefs_create_ulong(sb, dir, "enable",
508 &ibs_config.op_enabled);
509 oprofilefs_create_ulong(sb, dir, "max_count",
510 &ibs_config.max_cnt_op);
511 oprofilefs_create_ulong(sb, dir, "dispatched_ops",
512 &ibs_config.dispatched_ops);
513
514 return 0;
515}
516
517static int op_amd_init(struct oprofile_operations *ops)
518{
519 setup_ibs();
520 create_arch_files = ops->create_files;
521 ops->create_files = setup_ibs_files;
522 return 0;
523}
524
525static void op_amd_exit(void)
526{
527 clear_ibs_nmi();
528}
529
530#endif
531
532struct op_x86_model_spec const op_amd_spec = {
533 .init = op_amd_init,
534 .exit = op_amd_exit,
535 .num_counters = NUM_COUNTERS,
536 .num_controls = NUM_CONTROLS,
537 .fill_in_addresses = &op_amd_fill_in_addresses,
538 .setup_ctrs = &op_amd_setup_ctrs,
539 .check_ctrs = &op_amd_check_ctrs,
540 .start = &op_amd_start,
541 .stop = &op_amd_stop,
542 .shutdown = &op_amd_shutdown
543};
diff --git a/arch/x86/oprofile/op_model_athlon.c b/arch/x86/oprofile/op_model_athlon.c
deleted file mode 100644
index 3d534879a9dc..000000000000
--- a/arch/x86/oprofile/op_model_athlon.c
+++ /dev/null
@@ -1,190 +0,0 @@
1/*
2 * @file op_model_athlon.h
3 * athlon / K7 / K8 / Family 10h model-specific MSR operations
4 *
5 * @remark Copyright 2002 OProfile authors
6 * @remark Read the file COPYING
7 *
8 * @author John Levon
9 * @author Philippe Elie
10 * @author Graydon Hoare
11 */
12
13#include <linux/oprofile.h>
14#include <asm/ptrace.h>
15#include <asm/msr.h>
16#include <asm/nmi.h>
17
18#include "op_x86_model.h"
19#include "op_counter.h"
20
21#define NUM_COUNTERS 4
22#define NUM_CONTROLS 4
23
24#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
25#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0)
26#define CTR_WRITE(l, msrs, c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1); } while (0)
27#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
28
29#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
30#define CTRL_READ(l, h, msrs, c) do {rdmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
31#define CTRL_WRITE(l, h, msrs, c) do {wrmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
32#define CTRL_SET_ACTIVE(n) (n |= (1<<22))
33#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
34#define CTRL_CLEAR_LO(x) (x &= (1<<21))
35#define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0)
36#define CTRL_SET_ENABLE(val) (val |= 1<<20)
37#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16))
38#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17))
39#define CTRL_SET_UM(val, m) (val |= (m << 8))
40#define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff))
41#define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf))
42#define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9))
43#define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8))
44
45static unsigned long reset_value[NUM_COUNTERS];
46
47static void athlon_fill_in_addresses(struct op_msrs * const msrs)
48{
49 int i;
50
51 for (i = 0; i < NUM_COUNTERS; i++) {
52 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
53 msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
54 else
55 msrs->counters[i].addr = 0;
56 }
57
58 for (i = 0; i < NUM_CONTROLS; i++) {
59 if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i))
60 msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
61 else
62 msrs->controls[i].addr = 0;
63 }
64}
65
66
67static void athlon_setup_ctrs(struct op_msrs const * const msrs)
68{
69 unsigned int low, high;
70 int i;
71
72 /* clear all counters */
73 for (i = 0 ; i < NUM_CONTROLS; ++i) {
74 if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
75 continue;
76 CTRL_READ(low, high, msrs, i);
77 CTRL_CLEAR_LO(low);
78 CTRL_CLEAR_HI(high);
79 CTRL_WRITE(low, high, msrs, i);
80 }
81
82 /* avoid a false detection of ctr overflows in NMI handler */
83 for (i = 0; i < NUM_COUNTERS; ++i) {
84 if (unlikely(!CTR_IS_RESERVED(msrs, i)))
85 continue;
86 CTR_WRITE(1, msrs, i);
87 }
88
89 /* enable active counters */
90 for (i = 0; i < NUM_COUNTERS; ++i) {
91 if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) {
92 reset_value[i] = counter_config[i].count;
93
94 CTR_WRITE(counter_config[i].count, msrs, i);
95
96 CTRL_READ(low, high, msrs, i);
97 CTRL_CLEAR_LO(low);
98 CTRL_CLEAR_HI(high);
99 CTRL_SET_ENABLE(low);
100 CTRL_SET_USR(low, counter_config[i].user);
101 CTRL_SET_KERN(low, counter_config[i].kernel);
102 CTRL_SET_UM(low, counter_config[i].unit_mask);
103 CTRL_SET_EVENT_LOW(low, counter_config[i].event);
104 CTRL_SET_EVENT_HIGH(high, counter_config[i].event);
105 CTRL_SET_HOST_ONLY(high, 0);
106 CTRL_SET_GUEST_ONLY(high, 0);
107
108 CTRL_WRITE(low, high, msrs, i);
109 } else {
110 reset_value[i] = 0;
111 }
112 }
113}
114
115
116static int athlon_check_ctrs(struct pt_regs * const regs,
117 struct op_msrs const * const msrs)
118{
119 unsigned int low, high;
120 int i;
121
122 for (i = 0 ; i < NUM_COUNTERS; ++i) {
123 if (!reset_value[i])
124 continue;
125 CTR_READ(low, high, msrs, i);
126 if (CTR_OVERFLOWED(low)) {
127 oprofile_add_sample(regs, i);
128 CTR_WRITE(reset_value[i], msrs, i);
129 }
130 }
131
132 /* See op_model_ppro.c */
133 return 1;
134}
135
136
137static void athlon_start(struct op_msrs const * const msrs)
138{
139 unsigned int low, high;
140 int i;
141 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
142 if (reset_value[i]) {
143 CTRL_READ(low, high, msrs, i);
144 CTRL_SET_ACTIVE(low);
145 CTRL_WRITE(low, high, msrs, i);
146 }
147 }
148}
149
150
151static void athlon_stop(struct op_msrs const * const msrs)
152{
153 unsigned int low, high;
154 int i;
155
156 /* Subtle: stop on all counters to avoid race with
157 * setting our pm callback */
158 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
159 if (!reset_value[i])
160 continue;
161 CTRL_READ(low, high, msrs, i);
162 CTRL_SET_INACTIVE(low);
163 CTRL_WRITE(low, high, msrs, i);
164 }
165}
166
167static void athlon_shutdown(struct op_msrs const * const msrs)
168{
169 int i;
170
171 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
172 if (CTR_IS_RESERVED(msrs, i))
173 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
174 }
175 for (i = 0 ; i < NUM_CONTROLS ; ++i) {
176 if (CTRL_IS_RESERVED(msrs, i))
177 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
178 }
179}
180
181struct op_x86_model_spec const op_athlon_spec = {
182 .num_counters = NUM_COUNTERS,
183 .num_controls = NUM_CONTROLS,
184 .fill_in_addresses = &athlon_fill_in_addresses,
185 .setup_ctrs = &athlon_setup_ctrs,
186 .check_ctrs = &athlon_check_ctrs,
187 .start = &athlon_start,
188 .stop = &athlon_stop,
189 .shutdown = &athlon_shutdown
190};
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index 56b4757a1f47..43ac5af338d8 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -10,11 +10,12 @@
10 10
11#include <linux/oprofile.h> 11#include <linux/oprofile.h>
12#include <linux/smp.h> 12#include <linux/smp.h>
13#include <linux/ptrace.h>
14#include <linux/nmi.h>
13#include <asm/msr.h> 15#include <asm/msr.h>
14#include <asm/ptrace.h>
15#include <asm/fixmap.h> 16#include <asm/fixmap.h>
16#include <asm/apic.h> 17#include <asm/apic.h>
17#include <asm/nmi.h> 18
18 19
19#include "op_x86_model.h" 20#include "op_x86_model.h"
20#include "op_counter.h" 21#include "op_counter.h"
@@ -40,7 +41,7 @@ static unsigned int num_controls = NUM_CONTROLS_NON_HT;
40static inline void setup_num_counters(void) 41static inline void setup_num_counters(void)
41{ 42{
42#ifdef CONFIG_SMP 43#ifdef CONFIG_SMP
43 if (smp_num_siblings == 2){ 44 if (smp_num_siblings == 2) {
44 num_counters = NUM_COUNTERS_HT2; 45 num_counters = NUM_COUNTERS_HT2;
45 num_controls = NUM_CONTROLS_HT2; 46 num_controls = NUM_CONTROLS_HT2;
46 } 47 }
@@ -86,7 +87,7 @@ struct p4_event_binding {
86#define CTR_FLAME_2 (1 << 6) 87#define CTR_FLAME_2 (1 << 6)
87#define CTR_IQ_5 (1 << 7) 88#define CTR_IQ_5 (1 << 7)
88 89
89static struct p4_counter_binding p4_counters [NUM_COUNTERS_NON_HT] = { 90static struct p4_counter_binding p4_counters[NUM_COUNTERS_NON_HT] = {
90 { CTR_BPU_0, MSR_P4_BPU_PERFCTR0, MSR_P4_BPU_CCCR0 }, 91 { CTR_BPU_0, MSR_P4_BPU_PERFCTR0, MSR_P4_BPU_CCCR0 },
91 { CTR_MS_0, MSR_P4_MS_PERFCTR0, MSR_P4_MS_CCCR0 }, 92 { CTR_MS_0, MSR_P4_MS_PERFCTR0, MSR_P4_MS_CCCR0 },
92 { CTR_FLAME_0, MSR_P4_FLAME_PERFCTR0, MSR_P4_FLAME_CCCR0 }, 93 { CTR_FLAME_0, MSR_P4_FLAME_PERFCTR0, MSR_P4_FLAME_CCCR0 },
@@ -97,32 +98,32 @@ static struct p4_counter_binding p4_counters [NUM_COUNTERS_NON_HT] = {
97 { CTR_IQ_5, MSR_P4_IQ_PERFCTR5, MSR_P4_IQ_CCCR5 } 98 { CTR_IQ_5, MSR_P4_IQ_PERFCTR5, MSR_P4_IQ_CCCR5 }
98}; 99};
99 100
100#define NUM_UNUSED_CCCRS NUM_CCCRS_NON_HT - NUM_COUNTERS_NON_HT 101#define NUM_UNUSED_CCCRS (NUM_CCCRS_NON_HT - NUM_COUNTERS_NON_HT)
101 102
102/* p4 event codes in libop/op_event.h are indices into this table. */ 103/* p4 event codes in libop/op_event.h are indices into this table. */
103 104
104static struct p4_event_binding p4_events[NUM_EVENTS] = { 105static struct p4_event_binding p4_events[NUM_EVENTS] = {
105 106
106 { /* BRANCH_RETIRED */ 107 { /* BRANCH_RETIRED */
107 0x05, 0x06, 108 0x05, 0x06,
108 { {CTR_IQ_4, MSR_P4_CRU_ESCR2}, 109 { {CTR_IQ_4, MSR_P4_CRU_ESCR2},
109 {CTR_IQ_5, MSR_P4_CRU_ESCR3} } 110 {CTR_IQ_5, MSR_P4_CRU_ESCR3} }
110 }, 111 },
111 112
112 { /* MISPRED_BRANCH_RETIRED */ 113 { /* MISPRED_BRANCH_RETIRED */
113 0x04, 0x03, 114 0x04, 0x03,
114 { { CTR_IQ_4, MSR_P4_CRU_ESCR0}, 115 { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
115 { CTR_IQ_5, MSR_P4_CRU_ESCR1} } 116 { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
116 }, 117 },
117 118
118 { /* TC_DELIVER_MODE */ 119 { /* TC_DELIVER_MODE */
119 0x01, 0x01, 120 0x01, 0x01,
120 { { CTR_MS_0, MSR_P4_TC_ESCR0}, 121 { { CTR_MS_0, MSR_P4_TC_ESCR0},
121 { CTR_MS_2, MSR_P4_TC_ESCR1} } 122 { CTR_MS_2, MSR_P4_TC_ESCR1} }
122 }, 123 },
123 124
124 { /* BPU_FETCH_REQUEST */ 125 { /* BPU_FETCH_REQUEST */
125 0x00, 0x03, 126 0x00, 0x03,
126 { { CTR_BPU_0, MSR_P4_BPU_ESCR0}, 127 { { CTR_BPU_0, MSR_P4_BPU_ESCR0},
127 { CTR_BPU_2, MSR_P4_BPU_ESCR1} } 128 { CTR_BPU_2, MSR_P4_BPU_ESCR1} }
128 }, 129 },
@@ -146,7 +147,7 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
146 }, 147 },
147 148
148 { /* LOAD_PORT_REPLAY */ 149 { /* LOAD_PORT_REPLAY */
149 0x02, 0x04, 150 0x02, 0x04,
150 { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0}, 151 { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0},
151 { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} } 152 { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} }
152 }, 153 },
@@ -170,43 +171,43 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
170 }, 171 },
171 172
172 { /* BSQ_CACHE_REFERENCE */ 173 { /* BSQ_CACHE_REFERENCE */
173 0x07, 0x0c, 174 0x07, 0x0c,
174 { { CTR_BPU_0, MSR_P4_BSU_ESCR0}, 175 { { CTR_BPU_0, MSR_P4_BSU_ESCR0},
175 { CTR_BPU_2, MSR_P4_BSU_ESCR1} } 176 { CTR_BPU_2, MSR_P4_BSU_ESCR1} }
176 }, 177 },
177 178
178 { /* IOQ_ALLOCATION */ 179 { /* IOQ_ALLOCATION */
179 0x06, 0x03, 180 0x06, 0x03,
180 { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, 181 { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
181 { 0, 0 } } 182 { 0, 0 } }
182 }, 183 },
183 184
184 { /* IOQ_ACTIVE_ENTRIES */ 185 { /* IOQ_ACTIVE_ENTRIES */
185 0x06, 0x1a, 186 0x06, 0x1a,
186 { { CTR_BPU_2, MSR_P4_FSB_ESCR1}, 187 { { CTR_BPU_2, MSR_P4_FSB_ESCR1},
187 { 0, 0 } } 188 { 0, 0 } }
188 }, 189 },
189 190
190 { /* FSB_DATA_ACTIVITY */ 191 { /* FSB_DATA_ACTIVITY */
191 0x06, 0x17, 192 0x06, 0x17,
192 { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, 193 { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
193 { CTR_BPU_2, MSR_P4_FSB_ESCR1} } 194 { CTR_BPU_2, MSR_P4_FSB_ESCR1} }
194 }, 195 },
195 196
196 { /* BSQ_ALLOCATION */ 197 { /* BSQ_ALLOCATION */
197 0x07, 0x05, 198 0x07, 0x05,
198 { { CTR_BPU_0, MSR_P4_BSU_ESCR0}, 199 { { CTR_BPU_0, MSR_P4_BSU_ESCR0},
199 { 0, 0 } } 200 { 0, 0 } }
200 }, 201 },
201 202
202 { /* BSQ_ACTIVE_ENTRIES */ 203 { /* BSQ_ACTIVE_ENTRIES */
203 0x07, 0x06, 204 0x07, 0x06,
204 { { CTR_BPU_2, MSR_P4_BSU_ESCR1 /* guess */}, 205 { { CTR_BPU_2, MSR_P4_BSU_ESCR1 /* guess */},
205 { 0, 0 } } 206 { 0, 0 } }
206 }, 207 },
207 208
208 { /* X87_ASSIST */ 209 { /* X87_ASSIST */
209 0x05, 0x03, 210 0x05, 0x03,
210 { { CTR_IQ_4, MSR_P4_CRU_ESCR2}, 211 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
211 { CTR_IQ_5, MSR_P4_CRU_ESCR3} } 212 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
212 }, 213 },
@@ -216,21 +217,21 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
216 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 217 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
217 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 218 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
218 }, 219 },
219 220
220 { /* PACKED_SP_UOP */ 221 { /* PACKED_SP_UOP */
221 0x01, 0x08, 222 0x01, 0x08,
222 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 223 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
223 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 224 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
224 }, 225 },
225 226
226 { /* PACKED_DP_UOP */ 227 { /* PACKED_DP_UOP */
227 0x01, 0x0c, 228 0x01, 0x0c,
228 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 229 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
229 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 230 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
230 }, 231 },
231 232
232 { /* SCALAR_SP_UOP */ 233 { /* SCALAR_SP_UOP */
233 0x01, 0x0a, 234 0x01, 0x0a,
234 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 235 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
235 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 236 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
236 }, 237 },
@@ -242,31 +243,31 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
242 }, 243 },
243 244
244 { /* 64BIT_MMX_UOP */ 245 { /* 64BIT_MMX_UOP */
245 0x01, 0x02, 246 0x01, 0x02,
246 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 247 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
247 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 248 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
248 }, 249 },
249 250
250 { /* 128BIT_MMX_UOP */ 251 { /* 128BIT_MMX_UOP */
251 0x01, 0x1a, 252 0x01, 0x1a,
252 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 253 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
253 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 254 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
254 }, 255 },
255 256
256 { /* X87_FP_UOP */ 257 { /* X87_FP_UOP */
257 0x01, 0x04, 258 0x01, 0x04,
258 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 259 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
259 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 260 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
260 }, 261 },
261 262
262 { /* X87_SIMD_MOVES_UOP */ 263 { /* X87_SIMD_MOVES_UOP */
263 0x01, 0x2e, 264 0x01, 0x2e,
264 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 265 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
265 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 266 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
266 }, 267 },
267 268
268 { /* MACHINE_CLEAR */ 269 { /* MACHINE_CLEAR */
269 0x05, 0x02, 270 0x05, 0x02,
270 { { CTR_IQ_4, MSR_P4_CRU_ESCR2}, 271 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
271 { CTR_IQ_5, MSR_P4_CRU_ESCR3} } 272 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
272 }, 273 },
@@ -276,9 +277,9 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
276 { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, 277 { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
277 { CTR_BPU_2, MSR_P4_FSB_ESCR1} } 278 { CTR_BPU_2, MSR_P4_FSB_ESCR1} }
278 }, 279 },
279 280
280 { /* TC_MS_XFER */ 281 { /* TC_MS_XFER */
281 0x00, 0x05, 282 0x00, 0x05,
282 { { CTR_MS_0, MSR_P4_MS_ESCR0}, 283 { { CTR_MS_0, MSR_P4_MS_ESCR0},
283 { CTR_MS_2, MSR_P4_MS_ESCR1} } 284 { CTR_MS_2, MSR_P4_MS_ESCR1} }
284 }, 285 },
@@ -308,7 +309,7 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
308 }, 309 },
309 310
310 { /* INSTR_RETIRED */ 311 { /* INSTR_RETIRED */
311 0x04, 0x02, 312 0x04, 0x02,
312 { { CTR_IQ_4, MSR_P4_CRU_ESCR0}, 313 { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
313 { CTR_IQ_5, MSR_P4_CRU_ESCR1} } 314 { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
314 }, 315 },
@@ -319,14 +320,14 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
319 { CTR_IQ_5, MSR_P4_CRU_ESCR1} } 320 { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
320 }, 321 },
321 322
322 { /* UOP_TYPE */ 323 { /* UOP_TYPE */
323 0x02, 0x02, 324 0x02, 0x02,
324 { { CTR_IQ_4, MSR_P4_RAT_ESCR0}, 325 { { CTR_IQ_4, MSR_P4_RAT_ESCR0},
325 { CTR_IQ_5, MSR_P4_RAT_ESCR1} } 326 { CTR_IQ_5, MSR_P4_RAT_ESCR1} }
326 }, 327 },
327 328
328 { /* RETIRED_MISPRED_BRANCH_TYPE */ 329 { /* RETIRED_MISPRED_BRANCH_TYPE */
329 0x02, 0x05, 330 0x02, 0x05,
330 { { CTR_MS_0, MSR_P4_TBPU_ESCR0}, 331 { { CTR_MS_0, MSR_P4_TBPU_ESCR0},
331 { CTR_MS_2, MSR_P4_TBPU_ESCR1} } 332 { CTR_MS_2, MSR_P4_TBPU_ESCR1} }
332 }, 333 },
@@ -349,8 +350,8 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
349#define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1)) 350#define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1))
350#define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25)) 351#define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25))
351#define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9)) 352#define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9))
352#define ESCR_READ(escr,high,ev,i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high));} while (0) 353#define ESCR_READ(escr, high, ev, i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0)
353#define ESCR_WRITE(escr,high,ev,i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high));} while (0) 354#define ESCR_WRITE(escr, high, ev, i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0)
354 355
355#define CCCR_RESERVED_BITS 0x38030FFF 356#define CCCR_RESERVED_BITS 0x38030FFF
356#define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS) 357#define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS)
@@ -360,15 +361,15 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
360#define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27)) 361#define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27))
361#define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12)) 362#define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12))
362#define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12)) 363#define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12))
363#define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0) 364#define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0)
364#define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0) 365#define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0)
365#define CCCR_OVF_P(cccr) ((cccr) & (1U<<31)) 366#define CCCR_OVF_P(cccr) ((cccr) & (1U<<31))
366#define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31))) 367#define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31)))
367 368
368#define CTRL_IS_RESERVED(msrs,c) (msrs->controls[(c)].addr ? 1 : 0) 369#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
369#define CTR_IS_RESERVED(msrs,c) (msrs->counters[(c)].addr ? 1 : 0) 370#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
370#define CTR_READ(l,h,i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h));} while (0) 371#define CTR_READ(l, h, i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h)); } while (0)
371#define CTR_WRITE(l,i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1);} while (0) 372#define CTR_WRITE(l, i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1); } while (0)
372#define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000)) 373#define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000))
373 374
374 375
@@ -380,7 +381,7 @@ static unsigned int get_stagger(void)
380#ifdef CONFIG_SMP 381#ifdef CONFIG_SMP
381 int cpu = smp_processor_id(); 382 int cpu = smp_processor_id();
382 return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu))); 383 return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu)));
383#endif 384#endif
384 return 0; 385 return 0;
385} 386}
386 387
@@ -395,25 +396,23 @@ static unsigned long reset_value[NUM_COUNTERS_NON_HT];
395 396
396static void p4_fill_in_addresses(struct op_msrs * const msrs) 397static void p4_fill_in_addresses(struct op_msrs * const msrs)
397{ 398{
398 unsigned int i; 399 unsigned int i;
399 unsigned int addr, cccraddr, stag; 400 unsigned int addr, cccraddr, stag;
400 401
401 setup_num_counters(); 402 setup_num_counters();
402 stag = get_stagger(); 403 stag = get_stagger();
403 404
404 /* initialize some registers */ 405 /* initialize some registers */
405 for (i = 0; i < num_counters; ++i) { 406 for (i = 0; i < num_counters; ++i)
406 msrs->counters[i].addr = 0; 407 msrs->counters[i].addr = 0;
407 } 408 for (i = 0; i < num_controls; ++i)
408 for (i = 0; i < num_controls; ++i) {
409 msrs->controls[i].addr = 0; 409 msrs->controls[i].addr = 0;
410 } 410
411
412 /* the counter & cccr registers we pay attention to */ 411 /* the counter & cccr registers we pay attention to */
413 for (i = 0; i < num_counters; ++i) { 412 for (i = 0; i < num_counters; ++i) {
414 addr = p4_counters[VIRT_CTR(stag, i)].counter_address; 413 addr = p4_counters[VIRT_CTR(stag, i)].counter_address;
415 cccraddr = p4_counters[VIRT_CTR(stag, i)].cccr_address; 414 cccraddr = p4_counters[VIRT_CTR(stag, i)].cccr_address;
416 if (reserve_perfctr_nmi(addr)){ 415 if (reserve_perfctr_nmi(addr)) {
417 msrs->counters[i].addr = addr; 416 msrs->counters[i].addr = addr;
418 msrs->controls[i].addr = cccraddr; 417 msrs->controls[i].addr = cccraddr;
419 } 418 }
@@ -447,22 +446,22 @@ static void p4_fill_in_addresses(struct op_msrs * const msrs)
447 if (reserve_evntsel_nmi(addr)) 446 if (reserve_evntsel_nmi(addr))
448 msrs->controls[i].addr = addr; 447 msrs->controls[i].addr = addr;
449 } 448 }
450 449
451 for (addr = MSR_P4_MS_ESCR0 + stag; 450 for (addr = MSR_P4_MS_ESCR0 + stag;
452 addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) { 451 addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) {
453 if (reserve_evntsel_nmi(addr)) 452 if (reserve_evntsel_nmi(addr))
454 msrs->controls[i].addr = addr; 453 msrs->controls[i].addr = addr;
455 } 454 }
456 455
457 for (addr = MSR_P4_IX_ESCR0 + stag; 456 for (addr = MSR_P4_IX_ESCR0 + stag;
458 addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) { 457 addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) {
459 if (reserve_evntsel_nmi(addr)) 458 if (reserve_evntsel_nmi(addr))
460 msrs->controls[i].addr = addr; 459 msrs->controls[i].addr = addr;
461 } 460 }
462 461
463 /* there are 2 remaining non-contiguously located ESCRs */ 462 /* there are 2 remaining non-contiguously located ESCRs */
464 463
465 if (num_counters == NUM_COUNTERS_NON_HT) { 464 if (num_counters == NUM_COUNTERS_NON_HT) {
466 /* standard non-HT CPUs handle both remaining ESCRs*/ 465 /* standard non-HT CPUs handle both remaining ESCRs*/
467 if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5)) 466 if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5))
468 msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; 467 msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
@@ -498,20 +497,20 @@ static void pmc_setup_one_p4_counter(unsigned int ctr)
498 unsigned int stag; 497 unsigned int stag;
499 498
500 stag = get_stagger(); 499 stag = get_stagger();
501 500
502 /* convert from counter *number* to counter *bit* */ 501 /* convert from counter *number* to counter *bit* */
503 counter_bit = 1 << VIRT_CTR(stag, ctr); 502 counter_bit = 1 << VIRT_CTR(stag, ctr);
504 503
505 /* find our event binding structure. */ 504 /* find our event binding structure. */
506 if (counter_config[ctr].event <= 0 || counter_config[ctr].event > NUM_EVENTS) { 505 if (counter_config[ctr].event <= 0 || counter_config[ctr].event > NUM_EVENTS) {
507 printk(KERN_ERR 506 printk(KERN_ERR
508 "oprofile: P4 event code 0x%lx out of range\n", 507 "oprofile: P4 event code 0x%lx out of range\n",
509 counter_config[ctr].event); 508 counter_config[ctr].event);
510 return; 509 return;
511 } 510 }
512 511
513 ev = &(p4_events[counter_config[ctr].event - 1]); 512 ev = &(p4_events[counter_config[ctr].event - 1]);
514 513
515 for (i = 0; i < maxbind; i++) { 514 for (i = 0; i < maxbind; i++) {
516 if (ev->bindings[i].virt_counter & counter_bit) { 515 if (ev->bindings[i].virt_counter & counter_bit) {
517 516
@@ -526,25 +525,24 @@ static void pmc_setup_one_p4_counter(unsigned int ctr)
526 ESCR_SET_OS_1(escr, counter_config[ctr].kernel); 525 ESCR_SET_OS_1(escr, counter_config[ctr].kernel);
527 } 526 }
528 ESCR_SET_EVENT_SELECT(escr, ev->event_select); 527 ESCR_SET_EVENT_SELECT(escr, ev->event_select);
529 ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask); 528 ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask);
530 ESCR_WRITE(escr, high, ev, i); 529 ESCR_WRITE(escr, high, ev, i);
531 530
532 /* modify CCCR */ 531 /* modify CCCR */
533 CCCR_READ(cccr, high, VIRT_CTR(stag, ctr)); 532 CCCR_READ(cccr, high, VIRT_CTR(stag, ctr));
534 CCCR_CLEAR(cccr); 533 CCCR_CLEAR(cccr);
535 CCCR_SET_REQUIRED_BITS(cccr); 534 CCCR_SET_REQUIRED_BITS(cccr);
536 CCCR_SET_ESCR_SELECT(cccr, ev->escr_select); 535 CCCR_SET_ESCR_SELECT(cccr, ev->escr_select);
537 if (stag == 0) { 536 if (stag == 0)
538 CCCR_SET_PMI_OVF_0(cccr); 537 CCCR_SET_PMI_OVF_0(cccr);
539 } else { 538 else
540 CCCR_SET_PMI_OVF_1(cccr); 539 CCCR_SET_PMI_OVF_1(cccr);
541 }
542 CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr)); 540 CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr));
543 return; 541 return;
544 } 542 }
545 } 543 }
546 544
547 printk(KERN_ERR 545 printk(KERN_ERR
548 "oprofile: P4 event code 0x%lx no binding, stag %d ctr %d\n", 546 "oprofile: P4 event code 0x%lx no binding, stag %d ctr %d\n",
549 counter_config[ctr].event, stag, ctr); 547 counter_config[ctr].event, stag, ctr);
550} 548}
@@ -559,14 +557,14 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs)
559 stag = get_stagger(); 557 stag = get_stagger();
560 558
561 rdmsr(MSR_IA32_MISC_ENABLE, low, high); 559 rdmsr(MSR_IA32_MISC_ENABLE, low, high);
562 if (! MISC_PMC_ENABLED_P(low)) { 560 if (!MISC_PMC_ENABLED_P(low)) {
563 printk(KERN_ERR "oprofile: P4 PMC not available\n"); 561 printk(KERN_ERR "oprofile: P4 PMC not available\n");
564 return; 562 return;
565 } 563 }
566 564
567 /* clear the cccrs we will use */ 565 /* clear the cccrs we will use */
568 for (i = 0 ; i < num_counters ; i++) { 566 for (i = 0 ; i < num_counters ; i++) {
569 if (unlikely(!CTRL_IS_RESERVED(msrs,i))) 567 if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
570 continue; 568 continue;
571 rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); 569 rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
572 CCCR_CLEAR(low); 570 CCCR_CLEAR(low);
@@ -576,14 +574,14 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs)
576 574
577 /* clear all escrs (including those outside our concern) */ 575 /* clear all escrs (including those outside our concern) */
578 for (i = num_counters; i < num_controls; i++) { 576 for (i = num_counters; i < num_controls; i++) {
579 if (unlikely(!CTRL_IS_RESERVED(msrs,i))) 577 if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
580 continue; 578 continue;
581 wrmsr(msrs->controls[i].addr, 0, 0); 579 wrmsr(msrs->controls[i].addr, 0, 0);
582 } 580 }
583 581
584 /* setup all counters */ 582 /* setup all counters */
585 for (i = 0 ; i < num_counters ; ++i) { 583 for (i = 0 ; i < num_counters ; ++i) {
586 if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs,i))) { 584 if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs, i))) {
587 reset_value[i] = counter_config[i].count; 585 reset_value[i] = counter_config[i].count;
588 pmc_setup_one_p4_counter(i); 586 pmc_setup_one_p4_counter(i);
589 CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i)); 587 CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i));
@@ -603,11 +601,11 @@ static int p4_check_ctrs(struct pt_regs * const regs,
603 stag = get_stagger(); 601 stag = get_stagger();
604 602
605 for (i = 0; i < num_counters; ++i) { 603 for (i = 0; i < num_counters; ++i) {
606 604
607 if (!reset_value[i]) 605 if (!reset_value[i])
608 continue; 606 continue;
609 607
610 /* 608 /*
611 * there is some eccentricity in the hardware which 609 * there is some eccentricity in the hardware which
612 * requires that we perform 2 extra corrections: 610 * requires that we perform 2 extra corrections:
613 * 611 *
@@ -616,24 +614,24 @@ static int p4_check_ctrs(struct pt_regs * const regs,
616 * 614 *
617 * - write the counter back twice to ensure it gets 615 * - write the counter back twice to ensure it gets
618 * updated properly. 616 * updated properly.
619 * 617 *
620 * the former seems to be related to extra NMIs happening 618 * the former seems to be related to extra NMIs happening
621 * during the current NMI; the latter is reported as errata 619 * during the current NMI; the latter is reported as errata
622 * N15 in intel doc 249199-029, pentium 4 specification 620 * N15 in intel doc 249199-029, pentium 4 specification
623 * update, though their suggested work-around does not 621 * update, though their suggested work-around does not
624 * appear to solve the problem. 622 * appear to solve the problem.
625 */ 623 */
626 624
627 real = VIRT_CTR(stag, i); 625 real = VIRT_CTR(stag, i);
628 626
629 CCCR_READ(low, high, real); 627 CCCR_READ(low, high, real);
630 CTR_READ(ctr, high, real); 628 CTR_READ(ctr, high, real);
631 if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) { 629 if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) {
632 oprofile_add_sample(regs, i); 630 oprofile_add_sample(regs, i);
633 CTR_WRITE(reset_value[i], real); 631 CTR_WRITE(reset_value[i], real);
634 CCCR_CLEAR_OVF(low); 632 CCCR_CLEAR_OVF(low);
635 CCCR_WRITE(low, high, real); 633 CCCR_WRITE(low, high, real);
636 CTR_WRITE(reset_value[i], real); 634 CTR_WRITE(reset_value[i], real);
637 } 635 }
638 } 636 }
639 637
@@ -683,15 +681,16 @@ static void p4_shutdown(struct op_msrs const * const msrs)
683 int i; 681 int i;
684 682
685 for (i = 0 ; i < num_counters ; ++i) { 683 for (i = 0 ; i < num_counters ; ++i) {
686 if (CTR_IS_RESERVED(msrs,i)) 684 if (CTR_IS_RESERVED(msrs, i))
687 release_perfctr_nmi(msrs->counters[i].addr); 685 release_perfctr_nmi(msrs->counters[i].addr);
688 } 686 }
689 /* some of the control registers are specially reserved in 687 /*
688 * some of the control registers are specially reserved in
690 * conjunction with the counter registers (hence the starting offset). 689 * conjunction with the counter registers (hence the starting offset).
691 * This saves a few bits. 690 * This saves a few bits.
692 */ 691 */
693 for (i = num_counters ; i < num_controls ; ++i) { 692 for (i = num_counters ; i < num_controls ; ++i) {
694 if (CTRL_IS_RESERVED(msrs,i)) 693 if (CTRL_IS_RESERVED(msrs, i))
695 release_evntsel_nmi(msrs->controls[i].addr); 694 release_evntsel_nmi(msrs->controls[i].addr);
696 } 695 }
697} 696}
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
index 45b605fa71d0..05a0261ba0c3 100644
--- a/arch/x86/oprofile/op_x86_model.h
+++ b/arch/x86/oprofile/op_x86_model.h
@@ -32,6 +32,8 @@ struct pt_regs;
32 * various x86 CPU models' perfctr support. 32 * various x86 CPU models' perfctr support.
33 */ 33 */
34struct op_x86_model_spec { 34struct op_x86_model_spec {
35 int (*init)(struct oprofile_operations *ops);
36 void (*exit)(void);
35 unsigned int const num_counters; 37 unsigned int const num_counters;
36 unsigned int const num_controls; 38 unsigned int const num_controls;
37 void (*fill_in_addresses)(struct op_msrs * const msrs); 39 void (*fill_in_addresses)(struct op_msrs * const msrs);
@@ -46,6 +48,6 @@ struct op_x86_model_spec {
46extern struct op_x86_model_spec const op_ppro_spec; 48extern struct op_x86_model_spec const op_ppro_spec;
47extern struct op_x86_model_spec const op_p4_spec; 49extern struct op_x86_model_spec const op_p4_spec;
48extern struct op_x86_model_spec const op_p4_ht2_spec; 50extern struct op_x86_model_spec const op_p4_ht2_spec;
49extern struct op_x86_model_spec const op_athlon_spec; 51extern struct op_x86_model_spec const op_amd_spec;
50 52
51#endif /* OP_X86_MODEL_H */ 53#endif /* OP_X86_MODEL_H */