aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig12
-rw-r--r--arch/x86/oprofile/nmi_int.c162
-rw-r--r--arch/x86/oprofile/op_counter.h2
-rw-r--r--arch/x86/oprofile/op_model_amd.c110
-rw-r--r--arch/x86/oprofile/op_model_p4.c4
-rw-r--r--arch/x86/oprofile/op_model_ppro.c2
-rw-r--r--arch/x86/oprofile/op_x86_model.h7
7 files changed, 279 insertions, 20 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 99193b160232..beea3ccebb5e 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -30,6 +30,18 @@ config OPROFILE_IBS
30 30
31 If unsure, say N. 31 If unsure, say N.
32 32
33config OPROFILE_EVENT_MULTIPLEX
34 bool "OProfile multiplexing support (EXPERIMENTAL)"
35 default n
36 depends on OPROFILE && X86
37 help
38 The number of hardware counters is limited. The multiplexing
39 feature enables OProfile to gather more events than counters
40 are provided by the hardware. This is realized by switching
41 between events at an user specified time interval.
42
43 If unsure, say N.
44
33config HAVE_OPROFILE 45config HAVE_OPROFILE
34 bool 46 bool
35 47
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index fca8dc94531e..e54f6a0b35ac 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -1,11 +1,14 @@
1/** 1/**
2 * @file nmi_int.c 2 * @file nmi_int.c
3 * 3 *
4 * @remark Copyright 2002-2008 OProfile authors 4 * @remark Copyright 2002-2009 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 * @author Robert Richter <robert.richter@amd.com> 8 * @author Robert Richter <robert.richter@amd.com>
9 * @author Barry Kasindorf <barry.kasindorf@amd.com>
10 * @author Jason Yeh <jason.yeh@amd.com>
11 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
9 */ 12 */
10 13
11#include <linux/init.h> 14#include <linux/init.h>
@@ -24,6 +27,12 @@
24#include "op_counter.h" 27#include "op_counter.h"
25#include "op_x86_model.h" 28#include "op_x86_model.h"
26 29
30
31#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
32DEFINE_PER_CPU(int, switch_index);
33#endif
34
35
27static struct op_x86_model_spec const *model; 36static struct op_x86_model_spec const *model;
28static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); 37static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
29static DEFINE_PER_CPU(unsigned long, saved_lvtpc); 38static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
@@ -31,6 +40,13 @@ static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
31/* 0 == registered but off, 1 == registered and on */ 40/* 0 == registered but off, 1 == registered and on */
32static int nmi_enabled = 0; 41static int nmi_enabled = 0;
33 42
43
44#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
45extern atomic_t multiplex_counter;
46#endif
47
48struct op_counter_config counter_config[OP_MAX_COUNTER];
49
34/* common functions */ 50/* common functions */
35 51
36u64 op_x86_get_ctrl(struct op_x86_model_spec const *model, 52u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
@@ -95,6 +111,11 @@ static void free_msrs(void)
95 per_cpu(cpu_msrs, i).counters = NULL; 111 per_cpu(cpu_msrs, i).counters = NULL;
96 kfree(per_cpu(cpu_msrs, i).controls); 112 kfree(per_cpu(cpu_msrs, i).controls);
97 per_cpu(cpu_msrs, i).controls = NULL; 113 per_cpu(cpu_msrs, i).controls = NULL;
114
115#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
116 kfree(per_cpu(cpu_msrs, i).multiplex);
117 per_cpu(cpu_msrs, i).multiplex = NULL;
118#endif
98 } 119 }
99} 120}
100 121
@@ -103,6 +124,9 @@ static int allocate_msrs(void)
103 int success = 1; 124 int success = 1;
104 size_t controls_size = sizeof(struct op_msr) * model->num_controls; 125 size_t controls_size = sizeof(struct op_msr) * model->num_controls;
105 size_t counters_size = sizeof(struct op_msr) * model->num_counters; 126 size_t counters_size = sizeof(struct op_msr) * model->num_counters;
127#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
128 size_t multiplex_size = sizeof(struct op_msr) * model->num_virt_counters;
129#endif
106 130
107 int i; 131 int i;
108 for_each_possible_cpu(i) { 132 for_each_possible_cpu(i) {
@@ -118,6 +142,14 @@ static int allocate_msrs(void)
118 success = 0; 142 success = 0;
119 break; 143 break;
120 } 144 }
145#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
146 per_cpu(cpu_msrs, i).multiplex =
147 kmalloc(multiplex_size, GFP_KERNEL);
148 if (!per_cpu(cpu_msrs, i).multiplex) {
149 success = 0;
150 break;
151 }
152#endif
121 } 153 }
122 154
123 if (!success) 155 if (!success)
@@ -126,6 +158,25 @@ static int allocate_msrs(void)
126 return success; 158 return success;
127} 159}
128 160
161#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
162
163static void nmi_setup_cpu_mux(struct op_msrs const * const msrs)
164{
165 int i;
166 struct op_msr *multiplex = msrs->multiplex;
167
168 for (i = 0; i < model->num_virt_counters; ++i) {
169 if (counter_config[i].enabled) {
170 multiplex[i].saved = -(u64)counter_config[i].count;
171 } else {
172 multiplex[i].addr = 0;
173 multiplex[i].saved = 0;
174 }
175 }
176}
177
178#endif
179
129static void nmi_cpu_setup(void *dummy) 180static void nmi_cpu_setup(void *dummy)
130{ 181{
131 int cpu = smp_processor_id(); 182 int cpu = smp_processor_id();
@@ -133,6 +184,9 @@ static void nmi_cpu_setup(void *dummy)
133 nmi_cpu_save_registers(msrs); 184 nmi_cpu_save_registers(msrs);
134 spin_lock(&oprofilefs_lock); 185 spin_lock(&oprofilefs_lock);
135 model->setup_ctrs(model, msrs); 186 model->setup_ctrs(model, msrs);
187#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
188 nmi_setup_cpu_mux(msrs);
189#endif
136 spin_unlock(&oprofilefs_lock); 190 spin_unlock(&oprofilefs_lock);
137 per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC); 191 per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
138 apic_write(APIC_LVTPC, APIC_DM_NMI); 192 apic_write(APIC_LVTPC, APIC_DM_NMI);
@@ -173,14 +227,52 @@ static int nmi_setup(void)
173 memcpy(per_cpu(cpu_msrs, cpu).controls, 227 memcpy(per_cpu(cpu_msrs, cpu).controls,
174 per_cpu(cpu_msrs, 0).controls, 228 per_cpu(cpu_msrs, 0).controls,
175 sizeof(struct op_msr) * model->num_controls); 229 sizeof(struct op_msr) * model->num_controls);
230#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
231 memcpy(per_cpu(cpu_msrs, cpu).multiplex,
232 per_cpu(cpu_msrs, 0).multiplex,
233 sizeof(struct op_msr) * model->num_virt_counters);
234#endif
176 } 235 }
177
178 } 236 }
179 on_each_cpu(nmi_cpu_setup, NULL, 1); 237 on_each_cpu(nmi_cpu_setup, NULL, 1);
180 nmi_enabled = 1; 238 nmi_enabled = 1;
181 return 0; 239 return 0;
182} 240}
183 241
242#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
243
244static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
245{
246 unsigned int si = __get_cpu_var(switch_index);
247 struct op_msr *multiplex = msrs->multiplex;
248 unsigned int i;
249
250 for (i = 0; i < model->num_counters; ++i) {
251 int offset = i + si;
252 if (multiplex[offset].addr) {
253 rdmsrl(multiplex[offset].addr,
254 multiplex[offset].saved);
255 }
256 }
257}
258
259static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
260{
261 unsigned int si = __get_cpu_var(switch_index);
262 struct op_msr *multiplex = msrs->multiplex;
263 unsigned int i;
264
265 for (i = 0; i < model->num_counters; ++i) {
266 int offset = i + si;
267 if (multiplex[offset].addr) {
268 wrmsrl(multiplex[offset].addr,
269 multiplex[offset].saved);
270 }
271 }
272}
273
274#endif
275
184static void nmi_cpu_restore_registers(struct op_msrs *msrs) 276static void nmi_cpu_restore_registers(struct op_msrs *msrs)
185{ 277{
186 struct op_msr *counters = msrs->counters; 278 struct op_msr *counters = msrs->counters;
@@ -214,6 +306,9 @@ static void nmi_cpu_shutdown(void *dummy)
214 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); 306 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
215 apic_write(APIC_LVTERR, v); 307 apic_write(APIC_LVTERR, v);
216 nmi_cpu_restore_registers(msrs); 308 nmi_cpu_restore_registers(msrs);
309#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
310 __get_cpu_var(switch_index) = 0;
311#endif
217} 312}
218 313
219static void nmi_shutdown(void) 314static void nmi_shutdown(void)
@@ -252,16 +347,15 @@ static void nmi_stop(void)
252 on_each_cpu(nmi_cpu_stop, NULL, 1); 347 on_each_cpu(nmi_cpu_stop, NULL, 1);
253} 348}
254 349
255struct op_counter_config counter_config[OP_MAX_COUNTER];
256
257static int nmi_create_files(struct super_block *sb, struct dentry *root) 350static int nmi_create_files(struct super_block *sb, struct dentry *root)
258{ 351{
259 unsigned int i; 352 unsigned int i;
260 353
261 for (i = 0; i < model->num_counters; ++i) { 354 for (i = 0; i < model->num_virt_counters; ++i) {
262 struct dentry *dir; 355 struct dentry *dir;
263 char buf[4]; 356 char buf[4];
264 357
358#ifndef CONFIG_OPROFILE_EVENT_MULTIPLEX
265 /* quick little hack to _not_ expose a counter if it is not 359 /* quick little hack to _not_ expose a counter if it is not
266 * available for use. This should protect userspace app. 360 * available for use. This should protect userspace app.
267 * NOTE: assumes 1:1 mapping here (that counters are organized 361 * NOTE: assumes 1:1 mapping here (that counters are organized
@@ -269,6 +363,7 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root)
269 */ 363 */
270 if (unlikely(!avail_to_resrv_perfctr_nmi_bit(i))) 364 if (unlikely(!avail_to_resrv_perfctr_nmi_bit(i)))
271 continue; 365 continue;
366#endif /* CONFIG_OPROFILE_EVENT_MULTIPLEX */
272 367
273 snprintf(buf, sizeof(buf), "%d", i); 368 snprintf(buf, sizeof(buf), "%d", i);
274 dir = oprofilefs_mkdir(sb, root, buf); 369 dir = oprofilefs_mkdir(sb, root, buf);
@@ -283,6 +378,57 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root)
283 return 0; 378 return 0;
284} 379}
285 380
381#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
382
383static void nmi_cpu_switch(void *dummy)
384{
385 int cpu = smp_processor_id();
386 int si = per_cpu(switch_index, cpu);
387 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
388
389 nmi_cpu_stop(NULL);
390 nmi_cpu_save_mpx_registers(msrs);
391
392 /* move to next set */
393 si += model->num_counters;
394 if ((si > model->num_virt_counters) || (counter_config[si].count == 0))
395 per_cpu(switch_index, cpu) = 0;
396 else
397 per_cpu(switch_index, cpu) = si;
398
399 model->switch_ctrl(model, msrs);
400 nmi_cpu_restore_mpx_registers(msrs);
401
402 nmi_cpu_start(NULL);
403}
404
405
406/*
407 * Quick check to see if multiplexing is necessary.
408 * The check should be sufficient since counters are used
409 * in ordre.
410 */
411static int nmi_multiplex_on(void)
412{
413 return counter_config[model->num_counters].count ? 0 : -EINVAL;
414}
415
416static int nmi_switch_event(void)
417{
418 if (!model->switch_ctrl)
419 return -ENOSYS; /* not implemented */
420 if (nmi_multiplex_on() < 0)
421 return -EINVAL; /* not necessary */
422
423 on_each_cpu(nmi_cpu_switch, NULL, 1);
424
425 atomic_inc(&multiplex_counter);
426
427 return 0;
428}
429
430#endif
431
286#ifdef CONFIG_SMP 432#ifdef CONFIG_SMP
287static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action, 433static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
288 void *data) 434 void *data)
@@ -516,12 +662,18 @@ int __init op_nmi_init(struct oprofile_operations *ops)
516 register_cpu_notifier(&oprofile_cpu_nb); 662 register_cpu_notifier(&oprofile_cpu_nb);
517#endif 663#endif
518 /* default values, can be overwritten by model */ 664 /* default values, can be overwritten by model */
665#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
666 __raw_get_cpu_var(switch_index) = 0;
667#endif
519 ops->create_files = nmi_create_files; 668 ops->create_files = nmi_create_files;
520 ops->setup = nmi_setup; 669 ops->setup = nmi_setup;
521 ops->shutdown = nmi_shutdown; 670 ops->shutdown = nmi_shutdown;
522 ops->start = nmi_start; 671 ops->start = nmi_start;
523 ops->stop = nmi_stop; 672 ops->stop = nmi_stop;
524 ops->cpu_type = cpu_type; 673 ops->cpu_type = cpu_type;
674#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
675 ops->switch_events = nmi_switch_event;
676#endif
525 677
526 if (model->init) 678 if (model->init)
527 ret = model->init(ops); 679 ret = model->init(ops);
diff --git a/arch/x86/oprofile/op_counter.h b/arch/x86/oprofile/op_counter.h
index 91b6a116165e..e28398df0df2 100644
--- a/arch/x86/oprofile/op_counter.h
+++ b/arch/x86/oprofile/op_counter.h
@@ -10,7 +10,7 @@
10#ifndef OP_COUNTER_H 10#ifndef OP_COUNTER_H
11#define OP_COUNTER_H 11#define OP_COUNTER_H
12 12
13#define OP_MAX_COUNTER 8 13#define OP_MAX_COUNTER 32
14 14
15/* Per-perfctr configuration as set via 15/* Per-perfctr configuration as set via
16 * oprofilefs. 16 * oprofilefs.
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index f676f8825a3f..fdbed3a0c877 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -9,12 +9,15 @@
9 * @author Philippe Elie 9 * @author Philippe Elie
10 * @author Graydon Hoare 10 * @author Graydon Hoare
11 * @author Robert Richter <robert.richter@amd.com> 11 * @author Robert Richter <robert.richter@amd.com>
12 * @author Barry Kasindorf 12 * @author Barry Kasindorf <barry.kasindorf@amd.com>
13 * @author Jason Yeh <jason.yeh@amd.com>
14 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
13 */ 15 */
14 16
15#include <linux/oprofile.h> 17#include <linux/oprofile.h>
16#include <linux/device.h> 18#include <linux/device.h>
17#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/percpu.h>
18 21
19#include <asm/ptrace.h> 22#include <asm/ptrace.h>
20#include <asm/msr.h> 23#include <asm/msr.h>
@@ -25,12 +28,23 @@
25 28
26#define NUM_COUNTERS 4 29#define NUM_COUNTERS 4
27#define NUM_CONTROLS 4 30#define NUM_CONTROLS 4
31#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
32#define NUM_VIRT_COUNTERS 32
33#define NUM_VIRT_CONTROLS 32
34#else
35#define NUM_VIRT_COUNTERS NUM_COUNTERS
36#define NUM_VIRT_CONTROLS NUM_CONTROLS
37#endif
38
28#define OP_EVENT_MASK 0x0FFF 39#define OP_EVENT_MASK 0x0FFF
29#define OP_CTR_OVERFLOW (1ULL<<31) 40#define OP_CTR_OVERFLOW (1ULL<<31)
30 41
31#define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21)) 42#define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21))
32 43
33static unsigned long reset_value[NUM_COUNTERS]; 44static unsigned long reset_value[NUM_VIRT_COUNTERS];
45#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
46DECLARE_PER_CPU(int, switch_index);
47#endif
34 48
35#ifdef CONFIG_OPROFILE_IBS 49#ifdef CONFIG_OPROFILE_IBS
36 50
@@ -82,6 +96,16 @@ static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
82 else 96 else
83 msrs->controls[i].addr = 0; 97 msrs->controls[i].addr = 0;
84 } 98 }
99
100#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
101 for (i = 0; i < NUM_VIRT_COUNTERS; i++) {
102 int hw_counter = i % NUM_CONTROLS;
103 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
104 msrs->multiplex[i].addr = MSR_K7_PERFCTR0 + hw_counter;
105 else
106 msrs->multiplex[i].addr = 0;
107 }
108#endif
85} 109}
86 110
87static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, 111static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
@@ -90,6 +114,15 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
90 u64 val; 114 u64 val;
91 int i; 115 int i;
92 116
117 /* setup reset_value */
118 for (i = 0; i < NUM_VIRT_COUNTERS; ++i) {
119 if (counter_config[i].enabled) {
120 reset_value[i] = counter_config[i].count;
121 } else {
122 reset_value[i] = 0;
123 }
124 }
125
93 /* clear all counters */ 126 /* clear all counters */
94 for (i = 0; i < NUM_CONTROLS; ++i) { 127 for (i = 0; i < NUM_CONTROLS; ++i) {
95 if (unlikely(!msrs->controls[i].addr)) 128 if (unlikely(!msrs->controls[i].addr))
@@ -108,20 +141,49 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
108 141
109 /* enable active counters */ 142 /* enable active counters */
110 for (i = 0; i < NUM_COUNTERS; ++i) { 143 for (i = 0; i < NUM_COUNTERS; ++i) {
111 if (counter_config[i].enabled && msrs->counters[i].addr) { 144#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
112 reset_value[i] = counter_config[i].count; 145 int offset = i + __get_cpu_var(switch_index);
113 wrmsrl(msrs->counters[i].addr, 146#else
114 -(u64)counter_config[i].count); 147 int offset = i;
148#endif
149 if (counter_config[offset].enabled && msrs->counters[i].addr) {
150 /* setup counter registers */
151 wrmsrl(msrs->counters[i].addr, -(u64)reset_value[offset]);
152
153 /* setup control registers */
115 rdmsrl(msrs->controls[i].addr, val); 154 rdmsrl(msrs->controls[i].addr, val);
116 val &= model->reserved; 155 val &= model->reserved;
117 val |= op_x86_get_ctrl(model, &counter_config[i]); 156 val |= op_x86_get_ctrl(model, &counter_config[offset]);
157 wrmsrl(msrs->controls[i].addr, val);
158 }
159 }
160}
161
162
163#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
164
165static void op_amd_switch_ctrl(struct op_x86_model_spec const *model,
166 struct op_msrs const * const msrs)
167{
168 u64 val;
169 int i;
170
171 /* enable active counters */
172 for (i = 0; i < NUM_COUNTERS; ++i) {
173 int offset = i + __get_cpu_var(switch_index);
174 if (counter_config[offset].enabled) {
175 /* setup control registers */
176 rdmsrl(msrs->controls[i].addr, val);
177 val &= model->reserved;
178 val |= op_x86_get_ctrl(model, &counter_config[offset]);
118 wrmsrl(msrs->controls[i].addr, val); 179 wrmsrl(msrs->controls[i].addr, val);
119 } else {
120 reset_value[i] = 0;
121 } 180 }
122 } 181 }
123} 182}
124 183
184#endif
185
186
125#ifdef CONFIG_OPROFILE_IBS 187#ifdef CONFIG_OPROFILE_IBS
126 188
127static inline int 189static inline int
@@ -230,14 +292,19 @@ static int op_amd_check_ctrs(struct pt_regs * const regs,
230 int i; 292 int i;
231 293
232 for (i = 0; i < NUM_COUNTERS; ++i) { 294 for (i = 0; i < NUM_COUNTERS; ++i) {
233 if (!reset_value[i]) 295#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
296 int offset = i + __get_cpu_var(switch_index);
297#else
298 int offset = i;
299#endif
300 if (!reset_value[offset])
234 continue; 301 continue;
235 rdmsrl(msrs->counters[i].addr, val); 302 rdmsrl(msrs->counters[i].addr, val);
236 /* bit is clear if overflowed: */ 303 /* bit is clear if overflowed: */
237 if (val & OP_CTR_OVERFLOW) 304 if (val & OP_CTR_OVERFLOW)
238 continue; 305 continue;
239 oprofile_add_sample(regs, i); 306 oprofile_add_sample(regs, offset);
240 wrmsrl(msrs->counters[i].addr, -(u64)reset_value[i]); 307 wrmsrl(msrs->counters[i].addr, -(u64)reset_value[offset]);
241 } 308 }
242 309
243 op_amd_handle_ibs(regs, msrs); 310 op_amd_handle_ibs(regs, msrs);
@@ -250,8 +317,14 @@ static void op_amd_start(struct op_msrs const * const msrs)
250{ 317{
251 u64 val; 318 u64 val;
252 int i; 319 int i;
320
253 for (i = 0; i < NUM_COUNTERS; ++i) { 321 for (i = 0; i < NUM_COUNTERS; ++i) {
254 if (reset_value[i]) { 322#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
323 int offset = i + __get_cpu_var(switch_index);
324#else
325 int offset = i;
326#endif
327 if (reset_value[offset]) {
255 rdmsrl(msrs->controls[i].addr, val); 328 rdmsrl(msrs->controls[i].addr, val);
256 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 329 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
257 wrmsrl(msrs->controls[i].addr, val); 330 wrmsrl(msrs->controls[i].addr, val);
@@ -271,7 +344,11 @@ static void op_amd_stop(struct op_msrs const * const msrs)
271 * pm callback 344 * pm callback
272 */ 345 */
273 for (i = 0; i < NUM_COUNTERS; ++i) { 346 for (i = 0; i < NUM_COUNTERS; ++i) {
347#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
348 if (!reset_value[i + per_cpu(switch_index, smp_processor_id())])
349#else
274 if (!reset_value[i]) 350 if (!reset_value[i])
351#endif
275 continue; 352 continue;
276 rdmsrl(msrs->controls[i].addr, val); 353 rdmsrl(msrs->controls[i].addr, val);
277 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; 354 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
@@ -289,7 +366,7 @@ static void op_amd_shutdown(struct op_msrs const * const msrs)
289 if (msrs->counters[i].addr) 366 if (msrs->counters[i].addr)
290 release_perfctr_nmi(MSR_K7_PERFCTR0 + i); 367 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
291 } 368 }
292 for (i = 0; i < NUM_CONTROLS; ++i) { 369 for (i = 0; i < NUM_COUNTERS; ++i) {
293 if (msrs->controls[i].addr) 370 if (msrs->controls[i].addr)
294 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); 371 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
295 } 372 }
@@ -463,6 +540,8 @@ static void op_amd_exit(void) {}
463struct op_x86_model_spec const op_amd_spec = { 540struct op_x86_model_spec const op_amd_spec = {
464 .num_counters = NUM_COUNTERS, 541 .num_counters = NUM_COUNTERS,
465 .num_controls = NUM_CONTROLS, 542 .num_controls = NUM_CONTROLS,
543 .num_virt_counters = NUM_VIRT_COUNTERS,
544 .num_virt_controls = NUM_VIRT_CONTROLS,
466 .reserved = MSR_AMD_EVENTSEL_RESERVED, 545 .reserved = MSR_AMD_EVENTSEL_RESERVED,
467 .event_mask = OP_EVENT_MASK, 546 .event_mask = OP_EVENT_MASK,
468 .init = op_amd_init, 547 .init = op_amd_init,
@@ -473,4 +552,7 @@ struct op_x86_model_spec const op_amd_spec = {
473 .start = &op_amd_start, 552 .start = &op_amd_start,
474 .stop = &op_amd_stop, 553 .stop = &op_amd_stop,
475 .shutdown = &op_amd_shutdown, 554 .shutdown = &op_amd_shutdown,
555#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
556 .switch_ctrl = &op_amd_switch_ctrl,
557#endif
476}; 558};
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index 5921b7fc724b..65b9237cde8b 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -698,6 +698,8 @@ static void p4_shutdown(struct op_msrs const * const msrs)
698struct op_x86_model_spec const op_p4_ht2_spec = { 698struct op_x86_model_spec const op_p4_ht2_spec = {
699 .num_counters = NUM_COUNTERS_HT2, 699 .num_counters = NUM_COUNTERS_HT2,
700 .num_controls = NUM_CONTROLS_HT2, 700 .num_controls = NUM_CONTROLS_HT2,
701 .num_virt_counters = NUM_COUNTERS_HT2,
702 .num_virt_controls = NUM_CONTROLS_HT2,
701 .fill_in_addresses = &p4_fill_in_addresses, 703 .fill_in_addresses = &p4_fill_in_addresses,
702 .setup_ctrs = &p4_setup_ctrs, 704 .setup_ctrs = &p4_setup_ctrs,
703 .check_ctrs = &p4_check_ctrs, 705 .check_ctrs = &p4_check_ctrs,
@@ -710,6 +712,8 @@ struct op_x86_model_spec const op_p4_ht2_spec = {
710struct op_x86_model_spec const op_p4_spec = { 712struct op_x86_model_spec const op_p4_spec = {
711 .num_counters = NUM_COUNTERS_NON_HT, 713 .num_counters = NUM_COUNTERS_NON_HT,
712 .num_controls = NUM_CONTROLS_NON_HT, 714 .num_controls = NUM_CONTROLS_NON_HT,
715 .num_virt_counters = NUM_COUNTERS_NON_HT,
716 .num_virt_controls = NUM_CONTROLS_NON_HT,
713 .fill_in_addresses = &p4_fill_in_addresses, 717 .fill_in_addresses = &p4_fill_in_addresses,
714 .setup_ctrs = &p4_setup_ctrs, 718 .setup_ctrs = &p4_setup_ctrs,
715 .check_ctrs = &p4_check_ctrs, 719 .check_ctrs = &p4_check_ctrs,
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 570d717c3308..098cbca5c0b0 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -206,6 +206,8 @@ static void ppro_shutdown(struct op_msrs const * const msrs)
206struct op_x86_model_spec const op_ppro_spec = { 206struct op_x86_model_spec const op_ppro_spec = {
207 .num_counters = 2, 207 .num_counters = 2,
208 .num_controls = 2, 208 .num_controls = 2,
209 .num_virt_counters = 2,
210 .num_virt_controls = 2,
209 .reserved = MSR_PPRO_EVENTSEL_RESERVED, 211 .reserved = MSR_PPRO_EVENTSEL_RESERVED,
210 .fill_in_addresses = &ppro_fill_in_addresses, 212 .fill_in_addresses = &ppro_fill_in_addresses,
211 .setup_ctrs = &ppro_setup_ctrs, 213 .setup_ctrs = &ppro_setup_ctrs,
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
index 505489873b9d..0d07d23cb062 100644
--- a/arch/x86/oprofile/op_x86_model.h
+++ b/arch/x86/oprofile/op_x86_model.h
@@ -23,6 +23,7 @@ struct op_msr {
23struct op_msrs { 23struct op_msrs {
24 struct op_msr *counters; 24 struct op_msr *counters;
25 struct op_msr *controls; 25 struct op_msr *controls;
26 struct op_msr *multiplex;
26}; 27};
27 28
28struct pt_regs; 29struct pt_regs;
@@ -35,6 +36,8 @@ struct oprofile_operations;
35struct op_x86_model_spec { 36struct op_x86_model_spec {
36 unsigned int num_counters; 37 unsigned int num_counters;
37 unsigned int num_controls; 38 unsigned int num_controls;
39 unsigned int num_virt_counters;
40 unsigned int num_virt_controls;
38 u64 reserved; 41 u64 reserved;
39 u16 event_mask; 42 u16 event_mask;
40 int (*init)(struct oprofile_operations *ops); 43 int (*init)(struct oprofile_operations *ops);
@@ -47,6 +50,10 @@ struct op_x86_model_spec {
47 void (*start)(struct op_msrs const * const msrs); 50 void (*start)(struct op_msrs const * const msrs);
48 void (*stop)(struct op_msrs const * const msrs); 51 void (*stop)(struct op_msrs const * const msrs);
49 void (*shutdown)(struct op_msrs const * const msrs); 52 void (*shutdown)(struct op_msrs const * const msrs);
53#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
54 void (*switch_ctrl)(struct op_x86_model_spec const *model,
55 struct op_msrs const * const msrs);
56#endif
50}; 57};
51 58
52struct op_counter_config; 59struct op_counter_config;