aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/apic_32.c4
-rw-r--r--arch/x86/kernel/apic_64.c4
-rw-r--r--arch/x86/oprofile/Makefile2
-rw-r--r--arch/x86/oprofile/nmi_int.c127
-rw-r--r--arch/x86/oprofile/op_counter.h3
-rw-r--r--arch/x86/oprofile/op_model_amd.c559
-rw-r--r--arch/x86/oprofile/op_model_athlon.c190
-rw-r--r--arch/x86/oprofile/op_model_p4.c4
-rw-r--r--arch/x86/oprofile/op_model_ppro.c2
-rw-r--r--arch/x86/oprofile/op_x86_model.h7
10 files changed, 695 insertions, 207 deletions
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index f88bd0d982b0..0ff576d026a4 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -646,6 +646,9 @@ int setup_profiling_timer(unsigned int multiplier)
646 * 646 *
647 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and 647 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
648 * MCE interrupts are supported. Thus MCE offset must be set to 0. 648 * MCE interrupts are supported. Thus MCE offset must be set to 0.
649 *
650 * If mask=1, the LVT entry does not generate interrupts while mask=0
651 * enables the vector. See also the BKDGs.
649 */ 652 */
650 653
651#define APIC_EILVT_LVTOFF_MCE 0 654#define APIC_EILVT_LVTOFF_MCE 0
@@ -669,6 +672,7 @@ u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
669 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask); 672 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
670 return APIC_EILVT_LVTOFF_IBS; 673 return APIC_EILVT_LVTOFF_IBS;
671} 674}
675EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs);
672 676
673/* 677/*
674 * Local APIC start and shutdown 678 * Local APIC start and shutdown
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
index 446c062e831c..57744f4a75b4 100644
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@ -204,6 +204,9 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
204 * 204 *
205 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and 205 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
206 * MCE interrupts are supported. Thus MCE offset must be set to 0. 206 * MCE interrupts are supported. Thus MCE offset must be set to 0.
207 *
208 * If mask=1, the LVT entry does not generate interrupts while mask=0
209 * enables the vector. See also the BKDGs.
207 */ 210 */
208 211
209#define APIC_EILVT_LVTOFF_MCE 0 212#define APIC_EILVT_LVTOFF_MCE 0
@@ -228,6 +231,7 @@ u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
228 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask); 231 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
229 return APIC_EILVT_LVTOFF_IBS; 232 return APIC_EILVT_LVTOFF_IBS;
230} 233}
234EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs);
231 235
232/* 236/*
233 * Program the next event, relative to now 237 * Program the next event, relative to now
diff --git a/arch/x86/oprofile/Makefile b/arch/x86/oprofile/Makefile
index 30f3eb366667..446902b2a6b6 100644
--- a/arch/x86/oprofile/Makefile
+++ b/arch/x86/oprofile/Makefile
@@ -7,6 +7,6 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
7 timer_int.o ) 7 timer_int.o )
8 8
9oprofile-y := $(DRIVER_OBJS) init.o backtrace.o 9oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
10oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_athlon.o \ 10oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_amd.o \
11 op_model_ppro.o op_model_p4.o 11 op_model_ppro.o op_model_p4.o
12oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o 12oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 3f90289410e6..fb4902bc6f14 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -1,10 +1,11 @@
1/** 1/**
2 * @file nmi_int.c 2 * @file nmi_int.c
3 * 3 *
4 * @remark Copyright 2002 OProfile authors 4 * @remark Copyright 2002-2008 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 * @author Robert Richter <robert.richter@amd.com>
8 */ 9 */
9 10
10#include <linux/init.h> 11#include <linux/init.h>
@@ -22,12 +23,18 @@
22#include "op_counter.h" 23#include "op_counter.h"
23#include "op_x86_model.h" 24#include "op_x86_model.h"
24 25
26DEFINE_PER_CPU(int, switch_index);
27
25static struct op_x86_model_spec const *model; 28static struct op_x86_model_spec const *model;
26static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); 29static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
27static DEFINE_PER_CPU(unsigned long, saved_lvtpc); 30static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
28 31
29static int nmi_start(void); 32static int nmi_start(void);
30static void nmi_stop(void); 33static void nmi_stop(void);
34static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs);
35static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs);
36static void nmi_cpu_stop(void *dummy);
37static void nmi_cpu_start(void *dummy);
31 38
32/* 0 == registered but off, 1 == registered and on */ 39/* 0 == registered but off, 1 == registered and on */
33static int nmi_enabled = 0; 40static int nmi_enabled = 0;
@@ -80,6 +87,47 @@ static void exit_sysfs(void)
80#define exit_sysfs() do { } while (0) 87#define exit_sysfs() do { } while (0)
81#endif /* CONFIG_PM */ 88#endif /* CONFIG_PM */
82 89
90static void nmi_cpu_switch(void *dummy)
91{
92 int cpu = smp_processor_id();
93 int si = per_cpu(switch_index, cpu);
94 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
95
96 nmi_cpu_stop(NULL);
97 nmi_cpu_save_mpx_registers(msrs);
98
99 /* move to next set */
100 si += model->num_hardware_counters;
101 if ((si > model->num_counters) || (counter_config[si].count == 0))
102 per_cpu(switch_index, smp_processor_id()) = 0;
103 else
104 per_cpu(switch_index, smp_processor_id()) = si;
105
106 nmi_cpu_restore_mpx_registers(msrs);
107 model->setup_ctrs(msrs);
108 nmi_cpu_start(NULL);
109}
110
111/*
112 * Quick check to see if multiplexing is necessary.
113 * The check should be sufficient since counters are used
114 * in ordre.
115 */
116static int nmi_multiplex_on(void)
117{
118 return counter_config[model->num_hardware_counters].count ? 0 : -EINVAL;
119}
120
121static int nmi_switch_event(void)
122{
123 if (nmi_multiplex_on() < 0)
124 return -EINVAL;
125
126 on_each_cpu(nmi_cpu_switch, NULL, 1);
127
128 return 0;
129}
130
83static int profile_exceptions_notify(struct notifier_block *self, 131static int profile_exceptions_notify(struct notifier_block *self,
84 unsigned long val, void *data) 132 unsigned long val, void *data)
85{ 133{
@@ -143,11 +191,10 @@ static void free_msrs(void)
143 191
144static int allocate_msrs(void) 192static int allocate_msrs(void)
145{ 193{
146 int success = 1; 194 int i, success = 1;
147 size_t controls_size = sizeof(struct op_msr) * model->num_controls; 195 size_t controls_size = sizeof(struct op_msr) * model->num_controls;
148 size_t counters_size = sizeof(struct op_msr) * model->num_counters; 196 size_t counters_size = sizeof(struct op_msr) * model->num_counters;
149 197
150 int i;
151 for_each_possible_cpu(i) { 198 for_each_possible_cpu(i) {
152 per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, 199 per_cpu(cpu_msrs, i).counters = kmalloc(counters_size,
153 GFP_KERNEL); 200 GFP_KERNEL);
@@ -155,8 +202,8 @@ static int allocate_msrs(void)
155 success = 0; 202 success = 0;
156 break; 203 break;
157 } 204 }
158 per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, 205 per_cpu(cpu_msrs, i).controls =
159 GFP_KERNEL); 206 kmalloc(controls_size, GFP_KERNEL);
160 if (!per_cpu(cpu_msrs, i).controls) { 207 if (!per_cpu(cpu_msrs, i).controls) {
161 success = 0; 208 success = 0;
162 break; 209 break;
@@ -200,7 +247,8 @@ static int nmi_setup(void)
200 return err; 247 return err;
201 } 248 }
202 249
203 /* We need to serialize save and setup for HT because the subset 250 /*
251 * We need to serialize save and setup for HT because the subset
204 * of msrs are distinct for save and setup operations 252 * of msrs are distinct for save and setup operations
205 */ 253 */
206 254
@@ -216,7 +264,6 @@ static int nmi_setup(void)
216 per_cpu(cpu_msrs, 0).controls, 264 per_cpu(cpu_msrs, 0).controls,
217 sizeof(struct op_msr) * model->num_controls); 265 sizeof(struct op_msr) * model->num_controls);
218 } 266 }
219
220 } 267 }
221 on_each_cpu(nmi_save_registers, NULL, 1); 268 on_each_cpu(nmi_save_registers, NULL, 1);
222 on_each_cpu(nmi_cpu_setup, NULL, 1); 269 on_each_cpu(nmi_cpu_setup, NULL, 1);
@@ -224,7 +271,41 @@ static int nmi_setup(void)
224 return 0; 271 return 0;
225} 272}
226 273
227static void nmi_restore_registers(struct op_msrs *msrs) 274static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
275{
276 unsigned int si = __get_cpu_var(switch_index);
277 unsigned int const nr_ctrs = model->num_hardware_counters;
278 struct op_msr *counters = &msrs->counters[si];
279 unsigned int i;
280
281 for (i = 0; i < nr_ctrs; ++i) {
282 int offset = i + si;
283 if (counters[offset].addr) {
284 rdmsr(counters[offset].addr,
285 counters[offset].multiplex.low,
286 counters[offset].multiplex.high);
287 }
288 }
289}
290
291static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
292{
293 unsigned int si = __get_cpu_var(switch_index);
294 unsigned int const nr_ctrs = model->num_hardware_counters;
295 struct op_msr *counters = &msrs->counters[si];
296 unsigned int i;
297
298 for (i = 0; i < nr_ctrs; ++i) {
299 int offset = i + si;
300 if (counters[offset].addr) {
301 wrmsr(counters[offset].addr,
302 counters[offset].multiplex.low,
303 counters[offset].multiplex.high);
304 }
305 }
306}
307
308static void nmi_cpu_restore_registers(struct op_msrs *msrs)
228{ 309{
229 unsigned int const nr_ctrs = model->num_counters; 310 unsigned int const nr_ctrs = model->num_counters;
230 unsigned int const nr_ctrls = model->num_controls; 311 unsigned int const nr_ctrls = model->num_controls;
@@ -264,7 +345,8 @@ static void nmi_cpu_shutdown(void *dummy)
264 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); 345 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
265 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); 346 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
266 apic_write(APIC_LVTERR, v); 347 apic_write(APIC_LVTERR, v);
267 nmi_restore_registers(msrs); 348 nmi_cpu_restore_registers(msrs);
349 __get_cpu_var(switch_index) = 0;
268} 350}
269 351
270static void nmi_shutdown(void) 352static void nmi_shutdown(void)
@@ -327,6 +409,7 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root)
327 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask); 409 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
328 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel); 410 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
329 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user); 411 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
412 counter_config[i].save_count_low = 0;
330 } 413 }
331 414
332 return 0; 415 return 0;
@@ -411,6 +494,7 @@ int __init op_nmi_init(struct oprofile_operations *ops)
411 __u8 vendor = boot_cpu_data.x86_vendor; 494 __u8 vendor = boot_cpu_data.x86_vendor;
412 __u8 family = boot_cpu_data.x86; 495 __u8 family = boot_cpu_data.x86;
413 char *cpu_type; 496 char *cpu_type;
497 int ret = 0;
414 498
415 if (!cpu_has_apic) 499 if (!cpu_has_apic)
416 return -ENODEV; 500 return -ENODEV;
@@ -423,19 +507,23 @@ int __init op_nmi_init(struct oprofile_operations *ops)
423 default: 507 default:
424 return -ENODEV; 508 return -ENODEV;
425 case 6: 509 case 6:
426 model = &op_athlon_spec; 510 model = &op_amd_spec;
427 cpu_type = "i386/athlon"; 511 cpu_type = "i386/athlon";
428 break; 512 break;
429 case 0xf: 513 case 0xf:
430 model = &op_athlon_spec; 514 model = &op_amd_spec;
431 /* Actually it could be i386/hammer too, but give 515 /* Actually it could be i386/hammer too, but give
432 user space an consistent name. */ 516 user space an consistent name. */
433 cpu_type = "x86-64/hammer"; 517 cpu_type = "x86-64/hammer";
434 break; 518 break;
435 case 0x10: 519 case 0x10:
436 model = &op_athlon_spec; 520 model = &op_amd_spec;
437 cpu_type = "x86-64/family10"; 521 cpu_type = "x86-64/family10";
438 break; 522 break;
523 case 0x11:
524 model = &op_amd_spec;
525 cpu_type = "x86-64/family11h";
526 break;
439 } 527 }
440 break; 528 break;
441 529
@@ -462,14 +550,23 @@ int __init op_nmi_init(struct oprofile_operations *ops)
462 return -ENODEV; 550 return -ENODEV;
463 } 551 }
464 552
465 init_sysfs(); 553 /* default values, can be overwritten by model */
466 using_nmi = 1; 554 __get_cpu_var(switch_index) = 0;
467 ops->create_files = nmi_create_files; 555 ops->create_files = nmi_create_files;
468 ops->setup = nmi_setup; 556 ops->setup = nmi_setup;
469 ops->shutdown = nmi_shutdown; 557 ops->shutdown = nmi_shutdown;
470 ops->start = nmi_start; 558 ops->start = nmi_start;
471 ops->stop = nmi_stop; 559 ops->stop = nmi_stop;
472 ops->cpu_type = cpu_type; 560 ops->cpu_type = cpu_type;
561 ops->switch_events = nmi_switch_event;
562
563 if (model->init)
564 ret = model->init(ops);
565 if (ret)
566 return ret;
567
568 init_sysfs();
569 using_nmi = 1;
473 printk(KERN_INFO "oprofile: using NMI interrupt.\n"); 570 printk(KERN_INFO "oprofile: using NMI interrupt.\n");
474 return 0; 571 return 0;
475} 572}
@@ -478,4 +575,6 @@ void op_nmi_exit(void)
478{ 575{
479 if (using_nmi) 576 if (using_nmi)
480 exit_sysfs(); 577 exit_sysfs();
578 if (model->exit)
579 model->exit();
481} 580}
diff --git a/arch/x86/oprofile/op_counter.h b/arch/x86/oprofile/op_counter.h
index 2880b15c4675..786d6e01cf7f 100644
--- a/arch/x86/oprofile/op_counter.h
+++ b/arch/x86/oprofile/op_counter.h
@@ -10,13 +10,14 @@
10#ifndef OP_COUNTER_H 10#ifndef OP_COUNTER_H
11#define OP_COUNTER_H 11#define OP_COUNTER_H
12 12
13#define OP_MAX_COUNTER 8 13#define OP_MAX_COUNTER 32
14 14
15/* Per-perfctr configuration as set via 15/* Per-perfctr configuration as set via
16 * oprofilefs. 16 * oprofilefs.
17 */ 17 */
18struct op_counter_config { 18struct op_counter_config {
19 unsigned long count; 19 unsigned long count;
20 unsigned long save_count_low;
20 unsigned long enabled; 21 unsigned long enabled;
21 unsigned long event; 22 unsigned long event;
22 unsigned long kernel; 23 unsigned long kernel;
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
new file mode 100644
index 000000000000..bbf2b68bcc5d
--- /dev/null
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -0,0 +1,559 @@
1/*
2 * @file op_model_amd.c
3 * athlon / K7 / K8 / Family 10h model-specific MSR operations
4 *
5 * @remark Copyright 2002-2008 OProfile authors
6 * @remark Read the file COPYING
7 *
8 * @author John Levon
9 * @author Philippe Elie
10 * @author Graydon Hoare
11 * @author Robert Richter <robert.richter@amd.com>
12 * @author Barry Kasindorf
13*/
14
15#include <linux/oprofile.h>
16#include <linux/device.h>
17#include <linux/pci.h>
18#include <linux/percpu.h>
19
20#include <asm/ptrace.h>
21#include <asm/msr.h>
22#include <asm/nmi.h>
23
24#include "op_x86_model.h"
25#include "op_counter.h"
26
27#define NUM_COUNTERS 32
28#define NUM_HARDWARE_COUNTERS 4
29#define NUM_CONTROLS 32
30#define NUM_HARDWARE_CONTROLS 4
31
32#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
33#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0)
34#define CTR_WRITE(l, msrs, c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1); } while (0)
35#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
36
37#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
38#define CTRL_READ(l, h, msrs, c) do {rdmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
39#define CTRL_WRITE(l, h, msrs, c) do {wrmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
40#define CTRL_SET_ACTIVE(n) (n |= (1<<22))
41#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
42#define CTRL_CLEAR_LO(x) (x &= (1<<21))
43#define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0)
44#define CTRL_SET_ENABLE(val) (val |= 1<<20)
45#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16))
46#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17))
47#define CTRL_SET_UM(val, m) (val |= (m << 8))
48#define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff))
49#define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf))
50#define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9))
51#define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8))
52
53static unsigned long reset_value[NUM_COUNTERS];
54DECLARE_PER_CPU(int, switch_index);
55
56#ifdef CONFIG_OPROFILE_IBS
57
58/* IbsFetchCtl bits/masks */
59#define IBS_FETCH_HIGH_VALID_BIT (1UL << 17) /* bit 49 */
60#define IBS_FETCH_HIGH_ENABLE (1UL << 16) /* bit 48 */
61#define IBS_FETCH_LOW_MAX_CNT_MASK 0x0000FFFFUL /* MaxCnt mask */
62
63/*IbsOpCtl bits */
64#define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */
65#define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */
66
67/* Codes used in cpu_buffer.c */
68/* This produces duplicate code, need to be fixed */
69#define IBS_FETCH_BEGIN 3
70#define IBS_OP_BEGIN 4
71
72/* The function interface needs to be fixed, something like add
73 data. Should then be added to linux/oprofile.h. */
74extern void oprofile_add_ibs_sample(struct pt_regs *const regs,
75 unsigned int * const ibs_sample, u8 code);
76
77struct ibs_fetch_sample {
78 /* MSRC001_1031 IBS Fetch Linear Address Register */
79 unsigned int ibs_fetch_lin_addr_low;
80 unsigned int ibs_fetch_lin_addr_high;
81 /* MSRC001_1030 IBS Fetch Control Register */
82 unsigned int ibs_fetch_ctl_low;
83 unsigned int ibs_fetch_ctl_high;
84 /* MSRC001_1032 IBS Fetch Physical Address Register */
85 unsigned int ibs_fetch_phys_addr_low;
86 unsigned int ibs_fetch_phys_addr_high;
87};
88
89struct ibs_op_sample {
90 /* MSRC001_1034 IBS Op Logical Address Register (IbsRIP) */
91 unsigned int ibs_op_rip_low;
92 unsigned int ibs_op_rip_high;
93 /* MSRC001_1035 IBS Op Data Register */
94 unsigned int ibs_op_data1_low;
95 unsigned int ibs_op_data1_high;
96 /* MSRC001_1036 IBS Op Data 2 Register */
97 unsigned int ibs_op_data2_low;
98 unsigned int ibs_op_data2_high;
99 /* MSRC001_1037 IBS Op Data 3 Register */
100 unsigned int ibs_op_data3_low;
101 unsigned int ibs_op_data3_high;
102 /* MSRC001_1038 IBS DC Linear Address Register (IbsDcLinAd) */
103 unsigned int ibs_dc_linear_low;
104 unsigned int ibs_dc_linear_high;
105 /* MSRC001_1039 IBS DC Physical Address Register (IbsDcPhysAd) */
106 unsigned int ibs_dc_phys_low;
107 unsigned int ibs_dc_phys_high;
108};
109
110/*
111 * unitialize the APIC for the IBS interrupts if needed on AMD Family10h+
112*/
113static void clear_ibs_nmi(void);
114
115static int ibs_allowed; /* AMD Family10h and later */
116
117struct op_ibs_config {
118 unsigned long op_enabled;
119 unsigned long fetch_enabled;
120 unsigned long max_cnt_fetch;
121 unsigned long max_cnt_op;
122 unsigned long rand_en;
123 unsigned long dispatched_ops;
124};
125
126static struct op_ibs_config ibs_config;
127
128#endif
129
130/* functions for op_amd_spec */
131
132static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
133{
134 int i;
135
136 for (i = 0; i < NUM_COUNTERS; i++) {
137 int hw_counter = i % NUM_HARDWARE_COUNTERS;
138 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + hw_counter))
139 msrs->counters[i].addr = MSR_K7_PERFCTR0 + hw_counter;
140 else
141 msrs->counters[i].addr = 0;
142 }
143
144 for (i = 0; i < NUM_CONTROLS; i++) {
145 int hw_control = i % NUM_HARDWARE_CONTROLS;
146 if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + hw_control))
147 msrs->controls[i].addr = MSR_K7_EVNTSEL0 + hw_control;
148 else
149 msrs->controls[i].addr = 0;
150 }
151}
152
153
154static void op_amd_setup_ctrs(struct op_msrs const * const msrs)
155{
156 unsigned int low, high;
157 int i;
158
159 for (i = 0; i < NUM_HARDWARE_CONTROLS; ++i) {
160 int offset = i + __get_cpu_var(switch_index);
161 if (counter_config[offset].enabled)
162 reset_value[offset] = counter_config[offset].count;
163 else
164 reset_value[offset] = 0;
165 }
166
167 /* clear all counters */
168 for (i = 0 ; i < NUM_HARDWARE_CONTROLS; ++i) {
169 if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
170 continue;
171 CTRL_READ(low, high, msrs, i);
172 CTRL_CLEAR_LO(low);
173 CTRL_CLEAR_HI(high);
174 CTRL_WRITE(low, high, msrs, i);
175 }
176
177 /* avoid a false detection of ctr overflows in NMI handler */
178 for (i = 0; i < NUM_HARDWARE_COUNTERS; ++i) {
179 if (unlikely(!CTR_IS_RESERVED(msrs, i)))
180 continue;
181 CTR_WRITE(1, msrs, i);
182 }
183
184 /* enable active counters */
185 for (i = 0; i < NUM_HARDWARE_COUNTERS; ++i) {
186 int offset = i + __get_cpu_var(switch_index);
187 if ((counter_config[offset].enabled) && (CTR_IS_RESERVED(msrs, i))) {
188 CTR_WRITE(counter_config[offset].count, msrs, i);
189
190 CTRL_READ(low, high, msrs, i);
191 CTRL_CLEAR_LO(low);
192 CTRL_CLEAR_HI(high);
193 CTRL_SET_ENABLE(low);
194 CTRL_SET_USR(low, counter_config[offset].user);
195 CTRL_SET_KERN(low, counter_config[offset].kernel);
196 CTRL_SET_UM(low, counter_config[offset].unit_mask);
197 CTRL_SET_EVENT_LOW(low, counter_config[offset].event);
198 CTRL_SET_EVENT_HIGH(high, counter_config[offset].event);
199 CTRL_SET_HOST_ONLY(high, 0);
200 CTRL_SET_GUEST_ONLY(high, 0);
201
202 CTRL_WRITE(low, high, msrs, i);
203 }
204 }
205}
206
207#ifdef CONFIG_OPROFILE_IBS
208
209static inline int
210op_amd_handle_ibs(struct pt_regs * const regs,
211 struct op_msrs const * const msrs)
212{
213 unsigned int low, high;
214 struct ibs_fetch_sample ibs_fetch;
215 struct ibs_op_sample ibs_op;
216
217 if (!ibs_allowed)
218 return 1;
219
220 if (ibs_config.fetch_enabled) {
221 rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
222 if (high & IBS_FETCH_HIGH_VALID_BIT) {
223 ibs_fetch.ibs_fetch_ctl_high = high;
224 ibs_fetch.ibs_fetch_ctl_low = low;
225 rdmsr(MSR_AMD64_IBSFETCHLINAD, low, high);
226 ibs_fetch.ibs_fetch_lin_addr_high = high;
227 ibs_fetch.ibs_fetch_lin_addr_low = low;
228 rdmsr(MSR_AMD64_IBSFETCHPHYSAD, low, high);
229 ibs_fetch.ibs_fetch_phys_addr_high = high;
230 ibs_fetch.ibs_fetch_phys_addr_low = low;
231
232 oprofile_add_ibs_sample(regs,
233 (unsigned int *)&ibs_fetch,
234 IBS_FETCH_BEGIN);
235
236 /*reenable the IRQ */
237 rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
238 high &= ~IBS_FETCH_HIGH_VALID_BIT;
239 high |= IBS_FETCH_HIGH_ENABLE;
240 low &= IBS_FETCH_LOW_MAX_CNT_MASK;
241 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
242 }
243 }
244
245 if (ibs_config.op_enabled) {
246 rdmsr(MSR_AMD64_IBSOPCTL, low, high);
247 if (low & IBS_OP_LOW_VALID_BIT) {
248 rdmsr(MSR_AMD64_IBSOPRIP, low, high);
249 ibs_op.ibs_op_rip_low = low;
250 ibs_op.ibs_op_rip_high = high;
251 rdmsr(MSR_AMD64_IBSOPDATA, low, high);
252 ibs_op.ibs_op_data1_low = low;
253 ibs_op.ibs_op_data1_high = high;
254 rdmsr(MSR_AMD64_IBSOPDATA2, low, high);
255 ibs_op.ibs_op_data2_low = low;
256 ibs_op.ibs_op_data2_high = high;
257 rdmsr(MSR_AMD64_IBSOPDATA3, low, high);
258 ibs_op.ibs_op_data3_low = low;
259 ibs_op.ibs_op_data3_high = high;
260 rdmsr(MSR_AMD64_IBSDCLINAD, low, high);
261 ibs_op.ibs_dc_linear_low = low;
262 ibs_op.ibs_dc_linear_high = high;
263 rdmsr(MSR_AMD64_IBSDCPHYSAD, low, high);
264 ibs_op.ibs_dc_phys_low = low;
265 ibs_op.ibs_dc_phys_high = high;
266
267 /* reenable the IRQ */
268 oprofile_add_ibs_sample(regs,
269 (unsigned int *)&ibs_op,
270 IBS_OP_BEGIN);
271 rdmsr(MSR_AMD64_IBSOPCTL, low, high);
272 high = 0;
273 low &= ~IBS_OP_LOW_VALID_BIT;
274 low |= IBS_OP_LOW_ENABLE;
275 wrmsr(MSR_AMD64_IBSOPCTL, low, high);
276 }
277 }
278
279 return 1;
280}
281
282#endif
283
284static int op_amd_check_ctrs(struct pt_regs * const regs,
285 struct op_msrs const * const msrs)
286{
287 unsigned int low, high;
288 int i;
289
290 for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) {
291 int offset = i + __get_cpu_var(switch_index);
292 if (!reset_value[offset])
293 continue;
294 CTR_READ(low, high, msrs, i);
295 if (CTR_OVERFLOWED(low)) {
296 oprofile_add_sample(regs, offset);
297 CTR_WRITE(reset_value[offset], msrs, i);
298 }
299 }
300
301#ifdef CONFIG_OPROFILE_IBS
302 op_amd_handle_ibs(regs, msrs);
303#endif
304
305 /* See op_model_ppro.c */
306 return 1;
307}
308
309static void op_amd_start(struct op_msrs const * const msrs)
310{
311 unsigned int low, high;
312 int i;
313
314 for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) {
315 int offset = i + __get_cpu_var(switch_index);
316 if (reset_value[offset]) {
317 CTRL_READ(low, high, msrs, i);
318 CTRL_SET_ACTIVE(low);
319 CTRL_WRITE(low, high, msrs, i);
320 }
321 }
322
323#ifdef CONFIG_OPROFILE_IBS
324 if (ibs_allowed && ibs_config.fetch_enabled) {
325 low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
326 high = IBS_FETCH_HIGH_ENABLE;
327 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
328 }
329
330 if (ibs_allowed && ibs_config.op_enabled) {
331 low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) + IBS_OP_LOW_ENABLE;
332 high = 0;
333 wrmsr(MSR_AMD64_IBSOPCTL, low, high);
334 }
335#endif
336}
337
338
339static void op_amd_stop(struct op_msrs const * const msrs)
340{
341 unsigned int low, high;
342 int i;
343
344 /* Subtle: stop on all counters to avoid race with
345 * setting our pm callback */
346 for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) {
347 if (!reset_value[i + per_cpu(switch_index, smp_processor_id())])
348 continue;
349 CTRL_READ(low, high, msrs, i);
350 CTRL_SET_INACTIVE(low);
351 CTRL_WRITE(low, high, msrs, i);
352 }
353
354#ifdef CONFIG_OPROFILE_IBS
355 if (ibs_allowed && ibs_config.fetch_enabled) {
356 low = 0; /* clear max count and enable */
357 high = 0;
358 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
359 }
360
361 if (ibs_allowed && ibs_config.op_enabled) {
362 low = 0; /* clear max count and enable */
363 high = 0;
364 wrmsr(MSR_AMD64_IBSOPCTL, low, high);
365 }
366#endif
367}
368
369static void op_amd_shutdown(struct op_msrs const * const msrs)
370{
371 int i;
372
373 for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) {
374 if (CTR_IS_RESERVED(msrs, i))
375 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
376 }
377 for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) {
378 if (CTRL_IS_RESERVED(msrs, i))
379 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
380 }
381}
382
383#ifndef CONFIG_OPROFILE_IBS
384
385/* no IBS support */
386
387static int op_amd_init(struct oprofile_operations *ops)
388{
389 return 0;
390}
391
392static void op_amd_exit(void) {}
393
394#else
395
396static u8 ibs_eilvt_off;
397
398static inline void apic_init_ibs_nmi_per_cpu(void *arg)
399{
400 ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0);
401}
402
403static inline void apic_clear_ibs_nmi_per_cpu(void *arg)
404{
405 setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
406}
407
408static int pfm_amd64_setup_eilvt(void)
409{
410#define IBSCTL_LVTOFFSETVAL (1 << 8)
411#define IBSCTL 0x1cc
412 struct pci_dev *cpu_cfg;
413 int nodes;
414 u32 value = 0;
415
416 /* per CPU setup */
417 on_each_cpu(apic_init_ibs_nmi_per_cpu, NULL, 1);
418
419 nodes = 0;
420 cpu_cfg = NULL;
421 do {
422 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
423 PCI_DEVICE_ID_AMD_10H_NB_MISC,
424 cpu_cfg);
425 if (!cpu_cfg)
426 break;
427 ++nodes;
428 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
429 | IBSCTL_LVTOFFSETVAL);
430 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
431 if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) {
432 printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
433 "IBSCTL = 0x%08x", value);
434 return 1;
435 }
436 } while (1);
437
438 if (!nodes) {
439 printk(KERN_DEBUG "No CPU node configured for IBS");
440 return 1;
441 }
442
443#ifdef CONFIG_NUMA
444 /* Sanity check */
445 /* Works only for 64bit with proper numa implementation. */
446 if (nodes != num_possible_nodes()) {
447 printk(KERN_DEBUG "Failed to setup CPU node(s) for IBS, "
448 "found: %d, expected %d",
449 nodes, num_possible_nodes());
450 return 1;
451 }
452#endif
453 return 0;
454}
455
456/*
457 * initialize the APIC for the IBS interrupts
458 * if available (AMD Family10h rev B0 and later)
459 */
460static void setup_ibs(void)
461{
462 ibs_allowed = boot_cpu_has(X86_FEATURE_IBS);
463
464 if (!ibs_allowed)
465 return;
466
467 if (pfm_amd64_setup_eilvt()) {
468 ibs_allowed = 0;
469 return;
470 }
471
472 printk(KERN_INFO "oprofile: AMD IBS detected\n");
473}
474
475
476/*
477 * unitialize the APIC for the IBS interrupts if needed on AMD Family10h
478 * rev B0 and later */
479static void clear_ibs_nmi(void)
480{
481 if (ibs_allowed)
482 on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1);
483}
484
485static int (*create_arch_files)(struct super_block * sb, struct dentry * root);
486
487static int setup_ibs_files(struct super_block * sb, struct dentry * root)
488{
489 char buf[12];
490 struct dentry *dir;
491 int ret = 0;
492
493 /* architecture specific files */
494 if (create_arch_files)
495 ret = create_arch_files(sb, root);
496
497 if (ret)
498 return ret;
499
500 if (!ibs_allowed)
501 return ret;
502
503 /* model specific files */
504
505 /* setup some reasonable defaults */
506 ibs_config.max_cnt_fetch = 250000;
507 ibs_config.fetch_enabled = 0;
508 ibs_config.max_cnt_op = 250000;
509 ibs_config.op_enabled = 0;
510 ibs_config.dispatched_ops = 1;
511 snprintf(buf, sizeof(buf), "ibs_fetch");
512 dir = oprofilefs_mkdir(sb, root, buf);
513 oprofilefs_create_ulong(sb, dir, "rand_enable",
514 &ibs_config.rand_en);
515 oprofilefs_create_ulong(sb, dir, "enable",
516 &ibs_config.fetch_enabled);
517 oprofilefs_create_ulong(sb, dir, "max_count",
518 &ibs_config.max_cnt_fetch);
519 snprintf(buf, sizeof(buf), "ibs_uops");
520 dir = oprofilefs_mkdir(sb, root, buf);
521 oprofilefs_create_ulong(sb, dir, "enable",
522 &ibs_config.op_enabled);
523 oprofilefs_create_ulong(sb, dir, "max_count",
524 &ibs_config.max_cnt_op);
525 oprofilefs_create_ulong(sb, dir, "dispatched_ops",
526 &ibs_config.dispatched_ops);
527
528 return 0;
529}
530
531static int op_amd_init(struct oprofile_operations *ops)
532{
533 setup_ibs();
534 create_arch_files = ops->create_files;
535 ops->create_files = setup_ibs_files;
536 return 0;
537}
538
539static void op_amd_exit(void)
540{
541 clear_ibs_nmi();
542}
543
544#endif
545
546struct op_x86_model_spec const op_amd_spec = {
547 .init = op_amd_init,
548 .exit = op_amd_exit,
549 .num_counters = NUM_COUNTERS,
550 .num_controls = NUM_CONTROLS,
551 .num_hardware_counters = NUM_HARDWARE_COUNTERS,
552 .num_hardware_controls = NUM_HARDWARE_CONTROLS,
553 .fill_in_addresses = &op_amd_fill_in_addresses,
554 .setup_ctrs = &op_amd_setup_ctrs,
555 .check_ctrs = &op_amd_check_ctrs,
556 .start = &op_amd_start,
557 .stop = &op_amd_stop,
558 .shutdown = &op_amd_shutdown
559};
diff --git a/arch/x86/oprofile/op_model_athlon.c b/arch/x86/oprofile/op_model_athlon.c
deleted file mode 100644
index 3d534879a9dc..000000000000
--- a/arch/x86/oprofile/op_model_athlon.c
+++ /dev/null
@@ -1,190 +0,0 @@
1/*
2 * @file op_model_athlon.h
3 * athlon / K7 / K8 / Family 10h model-specific MSR operations
4 *
5 * @remark Copyright 2002 OProfile authors
6 * @remark Read the file COPYING
7 *
8 * @author John Levon
9 * @author Philippe Elie
10 * @author Graydon Hoare
11 */
12
13#include <linux/oprofile.h>
14#include <asm/ptrace.h>
15#include <asm/msr.h>
16#include <asm/nmi.h>
17
18#include "op_x86_model.h"
19#include "op_counter.h"
20
21#define NUM_COUNTERS 4
22#define NUM_CONTROLS 4
23
24#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
25#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0)
26#define CTR_WRITE(l, msrs, c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1); } while (0)
27#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
28
29#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
30#define CTRL_READ(l, h, msrs, c) do {rdmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
31#define CTRL_WRITE(l, h, msrs, c) do {wrmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
32#define CTRL_SET_ACTIVE(n) (n |= (1<<22))
33#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
34#define CTRL_CLEAR_LO(x) (x &= (1<<21))
35#define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0)
36#define CTRL_SET_ENABLE(val) (val |= 1<<20)
37#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16))
38#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17))
39#define CTRL_SET_UM(val, m) (val |= (m << 8))
40#define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff))
41#define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf))
42#define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9))
43#define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8))
44
45static unsigned long reset_value[NUM_COUNTERS];
46
47static void athlon_fill_in_addresses(struct op_msrs * const msrs)
48{
49 int i;
50
51 for (i = 0; i < NUM_COUNTERS; i++) {
52 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
53 msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
54 else
55 msrs->counters[i].addr = 0;
56 }
57
58 for (i = 0; i < NUM_CONTROLS; i++) {
59 if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i))
60 msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
61 else
62 msrs->controls[i].addr = 0;
63 }
64}
65
66
67static void athlon_setup_ctrs(struct op_msrs const * const msrs)
68{
69 unsigned int low, high;
70 int i;
71
72 /* clear all counters */
73 for (i = 0 ; i < NUM_CONTROLS; ++i) {
74 if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
75 continue;
76 CTRL_READ(low, high, msrs, i);
77 CTRL_CLEAR_LO(low);
78 CTRL_CLEAR_HI(high);
79 CTRL_WRITE(low, high, msrs, i);
80 }
81
82 /* avoid a false detection of ctr overflows in NMI handler */
83 for (i = 0; i < NUM_COUNTERS; ++i) {
84 if (unlikely(!CTR_IS_RESERVED(msrs, i)))
85 continue;
86 CTR_WRITE(1, msrs, i);
87 }
88
89 /* enable active counters */
90 for (i = 0; i < NUM_COUNTERS; ++i) {
91 if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) {
92 reset_value[i] = counter_config[i].count;
93
94 CTR_WRITE(counter_config[i].count, msrs, i);
95
96 CTRL_READ(low, high, msrs, i);
97 CTRL_CLEAR_LO(low);
98 CTRL_CLEAR_HI(high);
99 CTRL_SET_ENABLE(low);
100 CTRL_SET_USR(low, counter_config[i].user);
101 CTRL_SET_KERN(low, counter_config[i].kernel);
102 CTRL_SET_UM(low, counter_config[i].unit_mask);
103 CTRL_SET_EVENT_LOW(low, counter_config[i].event);
104 CTRL_SET_EVENT_HIGH(high, counter_config[i].event);
105 CTRL_SET_HOST_ONLY(high, 0);
106 CTRL_SET_GUEST_ONLY(high, 0);
107
108 CTRL_WRITE(low, high, msrs, i);
109 } else {
110 reset_value[i] = 0;
111 }
112 }
113}
114
115
116static int athlon_check_ctrs(struct pt_regs * const regs,
117 struct op_msrs const * const msrs)
118{
119 unsigned int low, high;
120 int i;
121
122 for (i = 0 ; i < NUM_COUNTERS; ++i) {
123 if (!reset_value[i])
124 continue;
125 CTR_READ(low, high, msrs, i);
126 if (CTR_OVERFLOWED(low)) {
127 oprofile_add_sample(regs, i);
128 CTR_WRITE(reset_value[i], msrs, i);
129 }
130 }
131
132 /* See op_model_ppro.c */
133 return 1;
134}
135
136
137static void athlon_start(struct op_msrs const * const msrs)
138{
139 unsigned int low, high;
140 int i;
141 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
142 if (reset_value[i]) {
143 CTRL_READ(low, high, msrs, i);
144 CTRL_SET_ACTIVE(low);
145 CTRL_WRITE(low, high, msrs, i);
146 }
147 }
148}
149
150
151static void athlon_stop(struct op_msrs const * const msrs)
152{
153 unsigned int low, high;
154 int i;
155
156 /* Subtle: stop on all counters to avoid race with
157 * setting our pm callback */
158 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
159 if (!reset_value[i])
160 continue;
161 CTRL_READ(low, high, msrs, i);
162 CTRL_SET_INACTIVE(low);
163 CTRL_WRITE(low, high, msrs, i);
164 }
165}
166
167static void athlon_shutdown(struct op_msrs const * const msrs)
168{
169 int i;
170
171 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
172 if (CTR_IS_RESERVED(msrs, i))
173 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
174 }
175 for (i = 0 ; i < NUM_CONTROLS ; ++i) {
176 if (CTRL_IS_RESERVED(msrs, i))
177 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
178 }
179}
180
181struct op_x86_model_spec const op_athlon_spec = {
182 .num_counters = NUM_COUNTERS,
183 .num_controls = NUM_CONTROLS,
184 .fill_in_addresses = &athlon_fill_in_addresses,
185 .setup_ctrs = &athlon_setup_ctrs,
186 .check_ctrs = &athlon_check_ctrs,
187 .start = &athlon_start,
188 .stop = &athlon_stop,
189 .shutdown = &athlon_shutdown
190};
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index 43ac5af338d8..cacba61ffbac 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -700,6 +700,8 @@ static void p4_shutdown(struct op_msrs const * const msrs)
700struct op_x86_model_spec const op_p4_ht2_spec = { 700struct op_x86_model_spec const op_p4_ht2_spec = {
701 .num_counters = NUM_COUNTERS_HT2, 701 .num_counters = NUM_COUNTERS_HT2,
702 .num_controls = NUM_CONTROLS_HT2, 702 .num_controls = NUM_CONTROLS_HT2,
703 .num_hardware_counters = NUM_COUNTERS_HT2,
704 .num_hardware_controls = NUM_CONTROLS_HT2,
703 .fill_in_addresses = &p4_fill_in_addresses, 705 .fill_in_addresses = &p4_fill_in_addresses,
704 .setup_ctrs = &p4_setup_ctrs, 706 .setup_ctrs = &p4_setup_ctrs,
705 .check_ctrs = &p4_check_ctrs, 707 .check_ctrs = &p4_check_ctrs,
@@ -712,6 +714,8 @@ struct op_x86_model_spec const op_p4_ht2_spec = {
712struct op_x86_model_spec const op_p4_spec = { 714struct op_x86_model_spec const op_p4_spec = {
713 .num_counters = NUM_COUNTERS_NON_HT, 715 .num_counters = NUM_COUNTERS_NON_HT,
714 .num_controls = NUM_CONTROLS_NON_HT, 716 .num_controls = NUM_CONTROLS_NON_HT,
717 .num_hardware_counters = NUM_COUNTERS_NON_HT,
718 .num_hardware_controls = NUM_CONTROLS_NON_HT,
715 .fill_in_addresses = &p4_fill_in_addresses, 719 .fill_in_addresses = &p4_fill_in_addresses,
716 .setup_ctrs = &p4_setup_ctrs, 720 .setup_ctrs = &p4_setup_ctrs,
717 .check_ctrs = &p4_check_ctrs, 721 .check_ctrs = &p4_check_ctrs,
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index eff431f6c57b..e5811aa480eb 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -183,6 +183,8 @@ static void ppro_shutdown(struct op_msrs const * const msrs)
183struct op_x86_model_spec const op_ppro_spec = { 183struct op_x86_model_spec const op_ppro_spec = {
184 .num_counters = NUM_COUNTERS, 184 .num_counters = NUM_COUNTERS,
185 .num_controls = NUM_CONTROLS, 185 .num_controls = NUM_CONTROLS,
186 .num_hardware_counters = NUM_COUNTERS,
187 .num_hardware_controls = NUM_CONTROLS,
186 .fill_in_addresses = &ppro_fill_in_addresses, 188 .fill_in_addresses = &ppro_fill_in_addresses,
187 .setup_ctrs = &ppro_setup_ctrs, 189 .setup_ctrs = &ppro_setup_ctrs,
188 .check_ctrs = &ppro_check_ctrs, 190 .check_ctrs = &ppro_check_ctrs,
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
index 45b605fa71d0..e07ba1076371 100644
--- a/arch/x86/oprofile/op_x86_model.h
+++ b/arch/x86/oprofile/op_x86_model.h
@@ -19,6 +19,7 @@ struct op_saved_msr {
19struct op_msr { 19struct op_msr {
20 unsigned long addr; 20 unsigned long addr;
21 struct op_saved_msr saved; 21 struct op_saved_msr saved;
22 struct op_saved_msr multiplex;
22}; 23};
23 24
24struct op_msrs { 25struct op_msrs {
@@ -32,6 +33,10 @@ struct pt_regs;
32 * various x86 CPU models' perfctr support. 33 * various x86 CPU models' perfctr support.
33 */ 34 */
34struct op_x86_model_spec { 35struct op_x86_model_spec {
36 int (*init)(struct oprofile_operations *ops);
37 void (*exit)(void);
38 unsigned int const num_hardware_counters;
39 unsigned int const num_hardware_controls;
35 unsigned int const num_counters; 40 unsigned int const num_counters;
36 unsigned int const num_controls; 41 unsigned int const num_controls;
37 void (*fill_in_addresses)(struct op_msrs * const msrs); 42 void (*fill_in_addresses)(struct op_msrs * const msrs);
@@ -46,6 +51,6 @@ struct op_x86_model_spec {
46extern struct op_x86_model_spec const op_ppro_spec; 51extern struct op_x86_model_spec const op_ppro_spec;
47extern struct op_x86_model_spec const op_p4_spec; 52extern struct op_x86_model_spec const op_p4_spec;
48extern struct op_x86_model_spec const op_p4_ht2_spec; 53extern struct op_x86_model_spec const op_p4_ht2_spec;
49extern struct op_x86_model_spec const op_athlon_spec; 54extern struct op_x86_model_spec const op_amd_spec;
50 55
51#endif /* OP_X86_MODEL_H */ 56#endif /* OP_X86_MODEL_H */