diff options
Diffstat (limited to 'arch/x86/oprofile/nmi_int.c')
-rw-r--r-- | arch/x86/oprofile/nmi_int.c | 100 |
1 files changed, 92 insertions, 8 deletions
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 287513a09819..2a65fe7680ab 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -23,12 +23,18 @@ | |||
23 | #include "op_counter.h" | 23 | #include "op_counter.h" |
24 | #include "op_x86_model.h" | 24 | #include "op_x86_model.h" |
25 | 25 | ||
26 | DEFINE_PER_CPU(int, switch_index); | ||
27 | |||
26 | static struct op_x86_model_spec const *model; | 28 | static struct op_x86_model_spec const *model; |
27 | static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); | 29 | static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); |
28 | static DEFINE_PER_CPU(unsigned long, saved_lvtpc); | 30 | static DEFINE_PER_CPU(unsigned long, saved_lvtpc); |
29 | 31 | ||
30 | static int nmi_start(void); | 32 | static int nmi_start(void); |
31 | static void nmi_stop(void); | 33 | static void nmi_stop(void); |
34 | static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs); | ||
35 | static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs); | ||
36 | static void nmi_cpu_stop(void *dummy); | ||
37 | static void nmi_cpu_start(void *dummy); | ||
32 | 38 | ||
33 | /* 0 == registered but off, 1 == registered and on */ | 39 | /* 0 == registered but off, 1 == registered and on */ |
34 | static int nmi_enabled = 0; | 40 | static int nmi_enabled = 0; |
@@ -81,6 +87,47 @@ static void exit_sysfs(void) | |||
81 | #define exit_sysfs() do { } while (0) | 87 | #define exit_sysfs() do { } while (0) |
82 | #endif /* CONFIG_PM */ | 88 | #endif /* CONFIG_PM */ |
83 | 89 | ||
90 | static void nmi_cpu_switch(void *dummy) | ||
91 | { | ||
92 | int cpu = smp_processor_id(); | ||
93 | int si = per_cpu(switch_index, cpu); | ||
94 | struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); | ||
95 | |||
96 | nmi_cpu_stop(NULL); | ||
97 | nmi_cpu_save_mpx_registers(msrs); | ||
98 | |||
99 | /* move to next set */ | ||
100 | si += model->num_hardware_counters; | ||
101 | if ((si > model->num_counters) || (counter_config[si].count == 0)) | ||
102 | per_cpu(switch_index, smp_processor_id()) = 0; | ||
103 | else | ||
104 | per_cpu(switch_index, smp_processor_id()) = si; | ||
105 | |||
106 | nmi_cpu_restore_mpx_registers(msrs); | ||
107 | model->setup_ctrs(msrs); | ||
108 | nmi_cpu_start(NULL); | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * Quick check to see if multiplexing is necessary. | ||
113 | * The check should be sufficient since counters are used | ||
114 | * in ordre. | ||
115 | */ | ||
116 | static int nmi_multiplex_on(void) | ||
117 | { | ||
118 | return counter_config[model->num_hardware_counters].count ? 0 : -EINVAL; | ||
119 | } | ||
120 | |||
121 | static int nmi_switch_event(void) | ||
122 | { | ||
123 | if (nmi_multiplex_on() < 0) | ||
124 | return -EINVAL; | ||
125 | |||
126 | on_each_cpu(nmi_cpu_switch, NULL, 0, 1); | ||
127 | |||
128 | return 0; | ||
129 | } | ||
130 | |||
84 | static int profile_exceptions_notify(struct notifier_block *self, | 131 | static int profile_exceptions_notify(struct notifier_block *self, |
85 | unsigned long val, void *data) | 132 | unsigned long val, void *data) |
86 | { | 133 | { |
@@ -144,11 +191,10 @@ static void free_msrs(void) | |||
144 | 191 | ||
145 | static int allocate_msrs(void) | 192 | static int allocate_msrs(void) |
146 | { | 193 | { |
147 | int success = 1; | 194 | int i, success = 1; |
148 | size_t controls_size = sizeof(struct op_msr) * model->num_controls; | 195 | size_t controls_size = sizeof(struct op_msr) * model->num_controls; |
149 | size_t counters_size = sizeof(struct op_msr) * model->num_counters; | 196 | size_t counters_size = sizeof(struct op_msr) * model->num_counters; |
150 | 197 | ||
151 | int i; | ||
152 | for_each_possible_cpu(i) { | 198 | for_each_possible_cpu(i) { |
153 | per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, | 199 | per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, |
154 | GFP_KERNEL); | 200 | GFP_KERNEL); |
@@ -156,8 +202,8 @@ static int allocate_msrs(void) | |||
156 | success = 0; | 202 | success = 0; |
157 | break; | 203 | break; |
158 | } | 204 | } |
159 | per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, | 205 | per_cpu(cpu_msrs, i).controls = |
160 | GFP_KERNEL); | 206 | kmalloc(controls_size, GFP_KERNEL); |
161 | if (!per_cpu(cpu_msrs, i).controls) { | 207 | if (!per_cpu(cpu_msrs, i).controls) { |
162 | success = 0; | 208 | success = 0; |
163 | break; | 209 | break; |
@@ -201,7 +247,8 @@ static int nmi_setup(void) | |||
201 | return err; | 247 | return err; |
202 | } | 248 | } |
203 | 249 | ||
204 | /* We need to serialize save and setup for HT because the subset | 250 | /* |
251 | * We need to serialize save and setup for HT because the subset | ||
205 | * of msrs are distinct for save and setup operations | 252 | * of msrs are distinct for save and setup operations |
206 | */ | 253 | */ |
207 | 254 | ||
@@ -217,7 +264,6 @@ static int nmi_setup(void) | |||
217 | per_cpu(cpu_msrs, 0).controls, | 264 | per_cpu(cpu_msrs, 0).controls, |
218 | sizeof(struct op_msr) * model->num_controls); | 265 | sizeof(struct op_msr) * model->num_controls); |
219 | } | 266 | } |
220 | |||
221 | } | 267 | } |
222 | on_each_cpu(nmi_save_registers, NULL, 1); | 268 | on_each_cpu(nmi_save_registers, NULL, 1); |
223 | on_each_cpu(nmi_cpu_setup, NULL, 1); | 269 | on_each_cpu(nmi_cpu_setup, NULL, 1); |
@@ -225,7 +271,41 @@ static int nmi_setup(void) | |||
225 | return 0; | 271 | return 0; |
226 | } | 272 | } |
227 | 273 | ||
228 | static void nmi_restore_registers(struct op_msrs *msrs) | 274 | static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs) |
275 | { | ||
276 | unsigned int si = __get_cpu_var(switch_index); | ||
277 | unsigned int const nr_ctrs = model->num_hardware_counters; | ||
278 | struct op_msr *counters = &msrs->counters[si]; | ||
279 | unsigned int i; | ||
280 | |||
281 | for (i = 0; i < nr_ctrs; ++i) { | ||
282 | int offset = i + si; | ||
283 | if (counters[offset].addr) { | ||
284 | rdmsr(counters[offset].addr, | ||
285 | counters[offset].multiplex.low, | ||
286 | counters[offset].multiplex.high); | ||
287 | } | ||
288 | } | ||
289 | } | ||
290 | |||
291 | static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs) | ||
292 | { | ||
293 | unsigned int si = __get_cpu_var(switch_index); | ||
294 | unsigned int const nr_ctrs = model->num_hardware_counters; | ||
295 | struct op_msr *counters = &msrs->counters[si]; | ||
296 | unsigned int i; | ||
297 | |||
298 | for (i = 0; i < nr_ctrs; ++i) { | ||
299 | int offset = i + si; | ||
300 | if (counters[offset].addr) { | ||
301 | wrmsr(counters[offset].addr, | ||
302 | counters[offset].multiplex.low, | ||
303 | counters[offset].multiplex.high); | ||
304 | } | ||
305 | } | ||
306 | } | ||
307 | |||
308 | static void nmi_cpu_restore_registers(struct op_msrs *msrs) | ||
229 | { | 309 | { |
230 | unsigned int const nr_ctrs = model->num_counters; | 310 | unsigned int const nr_ctrs = model->num_counters; |
231 | unsigned int const nr_ctrls = model->num_controls; | 311 | unsigned int const nr_ctrls = model->num_controls; |
@@ -265,7 +345,8 @@ static void nmi_cpu_shutdown(void *dummy) | |||
265 | apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); | 345 | apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); |
266 | apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); | 346 | apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); |
267 | apic_write(APIC_LVTERR, v); | 347 | apic_write(APIC_LVTERR, v); |
268 | nmi_restore_registers(msrs); | 348 | nmi_cpu_restore_registers(msrs); |
349 | __get_cpu_var(switch_index) = 0; | ||
269 | } | 350 | } |
270 | 351 | ||
271 | static void nmi_shutdown(void) | 352 | static void nmi_shutdown(void) |
@@ -328,6 +409,7 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root) | |||
328 | oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask); | 409 | oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask); |
329 | oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel); | 410 | oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel); |
330 | oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user); | 411 | oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user); |
412 | counter_config[i].save_count_low = 0; | ||
331 | } | 413 | } |
332 | 414 | ||
333 | return 0; | 415 | return 0; |
@@ -469,12 +551,14 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
469 | } | 551 | } |
470 | 552 | ||
471 | /* default values, can be overwritten by model */ | 553 | /* default values, can be overwritten by model */ |
554 | __get_cpu_var(switch_index) = 0; | ||
472 | ops->create_files = nmi_create_files; | 555 | ops->create_files = nmi_create_files; |
473 | ops->setup = nmi_setup; | 556 | ops->setup = nmi_setup; |
474 | ops->shutdown = nmi_shutdown; | 557 | ops->shutdown = nmi_shutdown; |
475 | ops->start = nmi_start; | 558 | ops->start = nmi_start; |
476 | ops->stop = nmi_stop; | 559 | ops->stop = nmi_stop; |
477 | ops->cpu_type = cpu_type; | 560 | ops->cpu_type = cpu_type; |
561 | ops->switch_events = nmi_switch_event; | ||
478 | 562 | ||
479 | if (model->init) | 563 | if (model->init) |
480 | ret = model->init(ops); | 564 | ret = model->init(ops); |