aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/oprofile
diff options
context:
space:
mode:
authorJason Yeh <jason.yeh@amd.com>2008-07-23 17:05:53 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-26 05:48:16 -0400
commit1a960b402a51d80abf54e3f8e4972374ffe5f22d (patch)
tree108222afe94df145e7a71f44bb077067c35f0131 /arch/x86/oprofile
parent6852fd9b86d05063c6ef49d2e12e061cc7f6a105 (diff)
Oprofile Multiplexing Patch
This patch introduces multiplexing support for the Oprofile kernel module. It basically adds a new function pointer in oprofile_operator allowing each architecture to supply its callback to switch between different sets of event when the timer expires. Userspace tools can modify the time slice through /dev/oprofile/time_slice. It also modifies the number of counters exposed to the userspace through /dev/oprofile. For example, the number of counters for AMD CPUs are changed to 32 and multiplexed in the sets of 4. Signed-off-by: Jason Yeh <jason.yeh@amd.com> Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: oprofile-list <oprofile-list@lists.sourceforge.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/oprofile')
-rw-r--r--arch/x86/oprofile/nmi_int.c100
-rw-r--r--arch/x86/oprofile/op_counter.h3
-rw-r--r--arch/x86/oprofile/op_model_amd.c76
-rw-r--r--arch/x86/oprofile/op_model_p4.c4
-rw-r--r--arch/x86/oprofile/op_model_ppro.c2
-rw-r--r--arch/x86/oprofile/op_x86_model.h3
6 files changed, 149 insertions, 39 deletions
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 287513a09819..2a65fe7680ab 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -23,12 +23,18 @@
23#include "op_counter.h" 23#include "op_counter.h"
24#include "op_x86_model.h" 24#include "op_x86_model.h"
25 25
26DEFINE_PER_CPU(int, switch_index);
27
26static struct op_x86_model_spec const *model; 28static struct op_x86_model_spec const *model;
27static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); 29static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
28static DEFINE_PER_CPU(unsigned long, saved_lvtpc); 30static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
29 31
30static int nmi_start(void); 32static int nmi_start(void);
31static void nmi_stop(void); 33static void nmi_stop(void);
34static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs);
35static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs);
36static void nmi_cpu_stop(void *dummy);
37static void nmi_cpu_start(void *dummy);
32 38
33/* 0 == registered but off, 1 == registered and on */ 39/* 0 == registered but off, 1 == registered and on */
34static int nmi_enabled = 0; 40static int nmi_enabled = 0;
@@ -81,6 +87,47 @@ static void exit_sysfs(void)
81#define exit_sysfs() do { } while (0) 87#define exit_sysfs() do { } while (0)
82#endif /* CONFIG_PM */ 88#endif /* CONFIG_PM */
83 89
90static void nmi_cpu_switch(void *dummy)
91{
92 int cpu = smp_processor_id();
93 int si = per_cpu(switch_index, cpu);
94 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
95
96 nmi_cpu_stop(NULL);
97 nmi_cpu_save_mpx_registers(msrs);
98
99 /* move to next set */
100 si += model->num_hardware_counters;
101 if ((si > model->num_counters) || (counter_config[si].count == 0))
102 per_cpu(switch_index, smp_processor_id()) = 0;
103 else
104 per_cpu(switch_index, smp_processor_id()) = si;
105
106 nmi_cpu_restore_mpx_registers(msrs);
107 model->setup_ctrs(msrs);
108 nmi_cpu_start(NULL);
109}
110
111/*
112 * Quick check to see if multiplexing is necessary.
113 * The check should be sufficient since counters are used
114 * in ordre.
115 */
116static int nmi_multiplex_on(void)
117{
118 return counter_config[model->num_hardware_counters].count ? 0 : -EINVAL;
119}
120
121static int nmi_switch_event(void)
122{
123 if (nmi_multiplex_on() < 0)
124 return -EINVAL;
125
126 on_each_cpu(nmi_cpu_switch, NULL, 0, 1);
127
128 return 0;
129}
130
84static int profile_exceptions_notify(struct notifier_block *self, 131static int profile_exceptions_notify(struct notifier_block *self,
85 unsigned long val, void *data) 132 unsigned long val, void *data)
86{ 133{
@@ -144,11 +191,10 @@ static void free_msrs(void)
144 191
145static int allocate_msrs(void) 192static int allocate_msrs(void)
146{ 193{
147 int success = 1; 194 int i, success = 1;
148 size_t controls_size = sizeof(struct op_msr) * model->num_controls; 195 size_t controls_size = sizeof(struct op_msr) * model->num_controls;
149 size_t counters_size = sizeof(struct op_msr) * model->num_counters; 196 size_t counters_size = sizeof(struct op_msr) * model->num_counters;
150 197
151 int i;
152 for_each_possible_cpu(i) { 198 for_each_possible_cpu(i) {
153 per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, 199 per_cpu(cpu_msrs, i).counters = kmalloc(counters_size,
154 GFP_KERNEL); 200 GFP_KERNEL);
@@ -156,8 +202,8 @@ static int allocate_msrs(void)
156 success = 0; 202 success = 0;
157 break; 203 break;
158 } 204 }
159 per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, 205 per_cpu(cpu_msrs, i).controls =
160 GFP_KERNEL); 206 kmalloc(controls_size, GFP_KERNEL);
161 if (!per_cpu(cpu_msrs, i).controls) { 207 if (!per_cpu(cpu_msrs, i).controls) {
162 success = 0; 208 success = 0;
163 break; 209 break;
@@ -201,7 +247,8 @@ static int nmi_setup(void)
201 return err; 247 return err;
202 } 248 }
203 249
204 /* We need to serialize save and setup for HT because the subset 250 /*
251 * We need to serialize save and setup for HT because the subset
205 * of msrs are distinct for save and setup operations 252 * of msrs are distinct for save and setup operations
206 */ 253 */
207 254
@@ -217,7 +264,6 @@ static int nmi_setup(void)
217 per_cpu(cpu_msrs, 0).controls, 264 per_cpu(cpu_msrs, 0).controls,
218 sizeof(struct op_msr) * model->num_controls); 265 sizeof(struct op_msr) * model->num_controls);
219 } 266 }
220
221 } 267 }
222 on_each_cpu(nmi_save_registers, NULL, 1); 268 on_each_cpu(nmi_save_registers, NULL, 1);
223 on_each_cpu(nmi_cpu_setup, NULL, 1); 269 on_each_cpu(nmi_cpu_setup, NULL, 1);
@@ -225,7 +271,41 @@ static int nmi_setup(void)
225 return 0; 271 return 0;
226} 272}
227 273
228static void nmi_restore_registers(struct op_msrs *msrs) 274static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
275{
276 unsigned int si = __get_cpu_var(switch_index);
277 unsigned int const nr_ctrs = model->num_hardware_counters;
278 struct op_msr *counters = &msrs->counters[si];
279 unsigned int i;
280
281 for (i = 0; i < nr_ctrs; ++i) {
282 int offset = i + si;
283 if (counters[offset].addr) {
284 rdmsr(counters[offset].addr,
285 counters[offset].multiplex.low,
286 counters[offset].multiplex.high);
287 }
288 }
289}
290
291static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
292{
293 unsigned int si = __get_cpu_var(switch_index);
294 unsigned int const nr_ctrs = model->num_hardware_counters;
295 struct op_msr *counters = &msrs->counters[si];
296 unsigned int i;
297
298 for (i = 0; i < nr_ctrs; ++i) {
299 int offset = i + si;
300 if (counters[offset].addr) {
301 wrmsr(counters[offset].addr,
302 counters[offset].multiplex.low,
303 counters[offset].multiplex.high);
304 }
305 }
306}
307
308static void nmi_cpu_restore_registers(struct op_msrs *msrs)
229{ 309{
230 unsigned int const nr_ctrs = model->num_counters; 310 unsigned int const nr_ctrs = model->num_counters;
231 unsigned int const nr_ctrls = model->num_controls; 311 unsigned int const nr_ctrls = model->num_controls;
@@ -265,7 +345,8 @@ static void nmi_cpu_shutdown(void *dummy)
265 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); 345 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
266 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); 346 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
267 apic_write(APIC_LVTERR, v); 347 apic_write(APIC_LVTERR, v);
268 nmi_restore_registers(msrs); 348 nmi_cpu_restore_registers(msrs);
349 __get_cpu_var(switch_index) = 0;
269} 350}
270 351
271static void nmi_shutdown(void) 352static void nmi_shutdown(void)
@@ -328,6 +409,7 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root)
328 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask); 409 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
329 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel); 410 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
330 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user); 411 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
412 counter_config[i].save_count_low = 0;
331 } 413 }
332 414
333 return 0; 415 return 0;
@@ -469,12 +551,14 @@ int __init op_nmi_init(struct oprofile_operations *ops)
469 } 551 }
470 552
471 /* default values, can be overwritten by model */ 553 /* default values, can be overwritten by model */
554 __get_cpu_var(switch_index) = 0;
472 ops->create_files = nmi_create_files; 555 ops->create_files = nmi_create_files;
473 ops->setup = nmi_setup; 556 ops->setup = nmi_setup;
474 ops->shutdown = nmi_shutdown; 557 ops->shutdown = nmi_shutdown;
475 ops->start = nmi_start; 558 ops->start = nmi_start;
476 ops->stop = nmi_stop; 559 ops->stop = nmi_stop;
477 ops->cpu_type = cpu_type; 560 ops->cpu_type = cpu_type;
561 ops->switch_events = nmi_switch_event;
478 562
479 if (model->init) 563 if (model->init)
480 ret = model->init(ops); 564 ret = model->init(ops);
diff --git a/arch/x86/oprofile/op_counter.h b/arch/x86/oprofile/op_counter.h
index 2880b15c4675..786d6e01cf7f 100644
--- a/arch/x86/oprofile/op_counter.h
+++ b/arch/x86/oprofile/op_counter.h
@@ -10,13 +10,14 @@
10#ifndef OP_COUNTER_H 10#ifndef OP_COUNTER_H
11#define OP_COUNTER_H 11#define OP_COUNTER_H
12 12
13#define OP_MAX_COUNTER 8 13#define OP_MAX_COUNTER 32
14 14
15/* Per-perfctr configuration as set via 15/* Per-perfctr configuration as set via
16 * oprofilefs. 16 * oprofilefs.
17 */ 17 */
18struct op_counter_config { 18struct op_counter_config {
19 unsigned long count; 19 unsigned long count;
20 unsigned long save_count_low;
20 unsigned long enabled; 21 unsigned long enabled;
21 unsigned long event; 22 unsigned long event;
22 unsigned long kernel; 23 unsigned long kernel;
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index d9faf607b3a6..bbf2b68bcc5d 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -15,6 +15,7 @@
15#include <linux/oprofile.h> 15#include <linux/oprofile.h>
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/pci.h> 17#include <linux/pci.h>
18#include <linux/percpu.h>
18 19
19#include <asm/ptrace.h> 20#include <asm/ptrace.h>
20#include <asm/msr.h> 21#include <asm/msr.h>
@@ -23,8 +24,10 @@
23#include "op_x86_model.h" 24#include "op_x86_model.h"
24#include "op_counter.h" 25#include "op_counter.h"
25 26
26#define NUM_COUNTERS 4 27#define NUM_COUNTERS 32
27#define NUM_CONTROLS 4 28#define NUM_HARDWARE_COUNTERS 4
29#define NUM_CONTROLS 32
30#define NUM_HARDWARE_CONTROLS 4
28 31
29#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) 32#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
30#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) 33#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0)
@@ -48,6 +51,7 @@
48#define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8)) 51#define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8))
49 52
50static unsigned long reset_value[NUM_COUNTERS]; 53static unsigned long reset_value[NUM_COUNTERS];
54DECLARE_PER_CPU(int, switch_index);
51 55
52#ifdef CONFIG_OPROFILE_IBS 56#ifdef CONFIG_OPROFILE_IBS
53 57
@@ -130,15 +134,17 @@ static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
130 int i; 134 int i;
131 135
132 for (i = 0; i < NUM_COUNTERS; i++) { 136 for (i = 0; i < NUM_COUNTERS; i++) {
133 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) 137 int hw_counter = i % NUM_HARDWARE_COUNTERS;
134 msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; 138 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + hw_counter))
139 msrs->counters[i].addr = MSR_K7_PERFCTR0 + hw_counter;
135 else 140 else
136 msrs->counters[i].addr = 0; 141 msrs->counters[i].addr = 0;
137 } 142 }
138 143
139 for (i = 0; i < NUM_CONTROLS; i++) { 144 for (i = 0; i < NUM_CONTROLS; i++) {
140 if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) 145 int hw_control = i % NUM_HARDWARE_CONTROLS;
141 msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; 146 if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + hw_control))
147 msrs->controls[i].addr = MSR_K7_EVNTSEL0 + hw_control;
142 else 148 else
143 msrs->controls[i].addr = 0; 149 msrs->controls[i].addr = 0;
144 } 150 }
@@ -150,8 +156,16 @@ static void op_amd_setup_ctrs(struct op_msrs const * const msrs)
150 unsigned int low, high; 156 unsigned int low, high;
151 int i; 157 int i;
152 158
159 for (i = 0; i < NUM_HARDWARE_CONTROLS; ++i) {
160 int offset = i + __get_cpu_var(switch_index);
161 if (counter_config[offset].enabled)
162 reset_value[offset] = counter_config[offset].count;
163 else
164 reset_value[offset] = 0;
165 }
166
153 /* clear all counters */ 167 /* clear all counters */
154 for (i = 0 ; i < NUM_CONTROLS; ++i) { 168 for (i = 0 ; i < NUM_HARDWARE_CONTROLS; ++i) {
155 if (unlikely(!CTRL_IS_RESERVED(msrs, i))) 169 if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
156 continue; 170 continue;
157 CTRL_READ(low, high, msrs, i); 171 CTRL_READ(low, high, msrs, i);
@@ -161,34 +175,31 @@ static void op_amd_setup_ctrs(struct op_msrs const * const msrs)
161 } 175 }
162 176
163 /* avoid a false detection of ctr overflows in NMI handler */ 177 /* avoid a false detection of ctr overflows in NMI handler */
164 for (i = 0; i < NUM_COUNTERS; ++i) { 178 for (i = 0; i < NUM_HARDWARE_COUNTERS; ++i) {
165 if (unlikely(!CTR_IS_RESERVED(msrs, i))) 179 if (unlikely(!CTR_IS_RESERVED(msrs, i)))
166 continue; 180 continue;
167 CTR_WRITE(1, msrs, i); 181 CTR_WRITE(1, msrs, i);
168 } 182 }
169 183
170 /* enable active counters */ 184 /* enable active counters */
171 for (i = 0; i < NUM_COUNTERS; ++i) { 185 for (i = 0; i < NUM_HARDWARE_COUNTERS; ++i) {
172 if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { 186 int offset = i + __get_cpu_var(switch_index);
173 reset_value[i] = counter_config[i].count; 187 if ((counter_config[offset].enabled) && (CTR_IS_RESERVED(msrs, i))) {
174 188 CTR_WRITE(counter_config[offset].count, msrs, i);
175 CTR_WRITE(counter_config[i].count, msrs, i);
176 189
177 CTRL_READ(low, high, msrs, i); 190 CTRL_READ(low, high, msrs, i);
178 CTRL_CLEAR_LO(low); 191 CTRL_CLEAR_LO(low);
179 CTRL_CLEAR_HI(high); 192 CTRL_CLEAR_HI(high);
180 CTRL_SET_ENABLE(low); 193 CTRL_SET_ENABLE(low);
181 CTRL_SET_USR(low, counter_config[i].user); 194 CTRL_SET_USR(low, counter_config[offset].user);
182 CTRL_SET_KERN(low, counter_config[i].kernel); 195 CTRL_SET_KERN(low, counter_config[offset].kernel);
183 CTRL_SET_UM(low, counter_config[i].unit_mask); 196 CTRL_SET_UM(low, counter_config[offset].unit_mask);
184 CTRL_SET_EVENT_LOW(low, counter_config[i].event); 197 CTRL_SET_EVENT_LOW(low, counter_config[offset].event);
185 CTRL_SET_EVENT_HIGH(high, counter_config[i].event); 198 CTRL_SET_EVENT_HIGH(high, counter_config[offset].event);
186 CTRL_SET_HOST_ONLY(high, 0); 199 CTRL_SET_HOST_ONLY(high, 0);
187 CTRL_SET_GUEST_ONLY(high, 0); 200 CTRL_SET_GUEST_ONLY(high, 0);
188 201
189 CTRL_WRITE(low, high, msrs, i); 202 CTRL_WRITE(low, high, msrs, i);
190 } else {
191 reset_value[i] = 0;
192 } 203 }
193 } 204 }
194} 205}
@@ -276,13 +287,14 @@ static int op_amd_check_ctrs(struct pt_regs * const regs,
276 unsigned int low, high; 287 unsigned int low, high;
277 int i; 288 int i;
278 289
279 for (i = 0 ; i < NUM_COUNTERS; ++i) { 290 for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) {
280 if (!reset_value[i]) 291 int offset = i + __get_cpu_var(switch_index);
292 if (!reset_value[offset])
281 continue; 293 continue;
282 CTR_READ(low, high, msrs, i); 294 CTR_READ(low, high, msrs, i);
283 if (CTR_OVERFLOWED(low)) { 295 if (CTR_OVERFLOWED(low)) {
284 oprofile_add_sample(regs, i); 296 oprofile_add_sample(regs, offset);
285 CTR_WRITE(reset_value[i], msrs, i); 297 CTR_WRITE(reset_value[offset], msrs, i);
286 } 298 }
287 } 299 }
288 300
@@ -298,8 +310,10 @@ static void op_amd_start(struct op_msrs const * const msrs)
298{ 310{
299 unsigned int low, high; 311 unsigned int low, high;
300 int i; 312 int i;
301 for (i = 0 ; i < NUM_COUNTERS ; ++i) { 313
302 if (reset_value[i]) { 314 for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) {
315 int offset = i + __get_cpu_var(switch_index);
316 if (reset_value[offset]) {
303 CTRL_READ(low, high, msrs, i); 317 CTRL_READ(low, high, msrs, i);
304 CTRL_SET_ACTIVE(low); 318 CTRL_SET_ACTIVE(low);
305 CTRL_WRITE(low, high, msrs, i); 319 CTRL_WRITE(low, high, msrs, i);
@@ -329,8 +343,8 @@ static void op_amd_stop(struct op_msrs const * const msrs)
329 343
330 /* Subtle: stop on all counters to avoid race with 344 /* Subtle: stop on all counters to avoid race with
331 * setting our pm callback */ 345 * setting our pm callback */
332 for (i = 0 ; i < NUM_COUNTERS ; ++i) { 346 for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) {
333 if (!reset_value[i]) 347 if (!reset_value[i + per_cpu(switch_index, smp_processor_id())])
334 continue; 348 continue;
335 CTRL_READ(low, high, msrs, i); 349 CTRL_READ(low, high, msrs, i);
336 CTRL_SET_INACTIVE(low); 350 CTRL_SET_INACTIVE(low);
@@ -356,11 +370,11 @@ static void op_amd_shutdown(struct op_msrs const * const msrs)
356{ 370{
357 int i; 371 int i;
358 372
359 for (i = 0 ; i < NUM_COUNTERS ; ++i) { 373 for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) {
360 if (CTR_IS_RESERVED(msrs, i)) 374 if (CTR_IS_RESERVED(msrs, i))
361 release_perfctr_nmi(MSR_K7_PERFCTR0 + i); 375 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
362 } 376 }
363 for (i = 0 ; i < NUM_CONTROLS ; ++i) { 377 for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) {
364 if (CTRL_IS_RESERVED(msrs, i)) 378 if (CTRL_IS_RESERVED(msrs, i))
365 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); 379 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
366 } 380 }
@@ -534,6 +548,8 @@ struct op_x86_model_spec const op_amd_spec = {
534 .exit = op_amd_exit, 548 .exit = op_amd_exit,
535 .num_counters = NUM_COUNTERS, 549 .num_counters = NUM_COUNTERS,
536 .num_controls = NUM_CONTROLS, 550 .num_controls = NUM_CONTROLS,
551 .num_hardware_counters = NUM_HARDWARE_COUNTERS,
552 .num_hardware_controls = NUM_HARDWARE_CONTROLS,
537 .fill_in_addresses = &op_amd_fill_in_addresses, 553 .fill_in_addresses = &op_amd_fill_in_addresses,
538 .setup_ctrs = &op_amd_setup_ctrs, 554 .setup_ctrs = &op_amd_setup_ctrs,
539 .check_ctrs = &op_amd_check_ctrs, 555 .check_ctrs = &op_amd_check_ctrs,
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index 56b4757a1f47..e641545d4796 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -701,6 +701,8 @@ static void p4_shutdown(struct op_msrs const * const msrs)
701struct op_x86_model_spec const op_p4_ht2_spec = { 701struct op_x86_model_spec const op_p4_ht2_spec = {
702 .num_counters = NUM_COUNTERS_HT2, 702 .num_counters = NUM_COUNTERS_HT2,
703 .num_controls = NUM_CONTROLS_HT2, 703 .num_controls = NUM_CONTROLS_HT2,
704 .num_hardware_counters = NUM_COUNTERS_HT2,
705 .num_hardware_controls = NUM_CONTROLS_HT2,
704 .fill_in_addresses = &p4_fill_in_addresses, 706 .fill_in_addresses = &p4_fill_in_addresses,
705 .setup_ctrs = &p4_setup_ctrs, 707 .setup_ctrs = &p4_setup_ctrs,
706 .check_ctrs = &p4_check_ctrs, 708 .check_ctrs = &p4_check_ctrs,
@@ -713,6 +715,8 @@ struct op_x86_model_spec const op_p4_ht2_spec = {
713struct op_x86_model_spec const op_p4_spec = { 715struct op_x86_model_spec const op_p4_spec = {
714 .num_counters = NUM_COUNTERS_NON_HT, 716 .num_counters = NUM_COUNTERS_NON_HT,
715 .num_controls = NUM_CONTROLS_NON_HT, 717 .num_controls = NUM_CONTROLS_NON_HT,
718 .num_hardware_counters = NUM_COUNTERS_NON_HT,
719 .num_hardware_controls = NUM_CONTROLS_NON_HT,
716 .fill_in_addresses = &p4_fill_in_addresses, 720 .fill_in_addresses = &p4_fill_in_addresses,
717 .setup_ctrs = &p4_setup_ctrs, 721 .setup_ctrs = &p4_setup_ctrs,
718 .check_ctrs = &p4_check_ctrs, 722 .check_ctrs = &p4_check_ctrs,
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index eff431f6c57b..e5811aa480eb 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -183,6 +183,8 @@ static void ppro_shutdown(struct op_msrs const * const msrs)
183struct op_x86_model_spec const op_ppro_spec = { 183struct op_x86_model_spec const op_ppro_spec = {
184 .num_counters = NUM_COUNTERS, 184 .num_counters = NUM_COUNTERS,
185 .num_controls = NUM_CONTROLS, 185 .num_controls = NUM_CONTROLS,
186 .num_hardware_counters = NUM_COUNTERS,
187 .num_hardware_controls = NUM_CONTROLS,
186 .fill_in_addresses = &ppro_fill_in_addresses, 188 .fill_in_addresses = &ppro_fill_in_addresses,
187 .setup_ctrs = &ppro_setup_ctrs, 189 .setup_ctrs = &ppro_setup_ctrs,
188 .check_ctrs = &ppro_check_ctrs, 190 .check_ctrs = &ppro_check_ctrs,
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
index 05a0261ba0c3..e07ba1076371 100644
--- a/arch/x86/oprofile/op_x86_model.h
+++ b/arch/x86/oprofile/op_x86_model.h
@@ -19,6 +19,7 @@ struct op_saved_msr {
19struct op_msr { 19struct op_msr {
20 unsigned long addr; 20 unsigned long addr;
21 struct op_saved_msr saved; 21 struct op_saved_msr saved;
22 struct op_saved_msr multiplex;
22}; 23};
23 24
24struct op_msrs { 25struct op_msrs {
@@ -34,6 +35,8 @@ struct pt_regs;
34struct op_x86_model_spec { 35struct op_x86_model_spec {
35 int (*init)(struct oprofile_operations *ops); 36 int (*init)(struct oprofile_operations *ops);
36 void (*exit)(void); 37 void (*exit)(void);
38 unsigned int const num_hardware_counters;
39 unsigned int const num_hardware_controls;
37 unsigned int const num_counters; 40 unsigned int const num_counters;
38 unsigned int const num_controls; 41 unsigned int const num_controls;
39 void (*fill_in_addresses)(struct op_msrs * const msrs); 42 void (*fill_in_addresses)(struct op_msrs * const msrs);