diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-18 11:18:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-18 11:18:07 -0400 |
commit | 3aaf51ace5975050ab43c7d4d7e439e0ae7d13d7 (patch) | |
tree | 3ceb741d8b78c6dc78be3fd2e4f8aac443044787 /arch | |
parent | f262af3d08d3fffc4e11277d3a177b2d67ea2aba (diff) | |
parent | cc49b092d308f8ea8634134b0d95d831a88a674b (diff) |
Merge branch 'oprofile-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'oprofile-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (24 commits)
oprofile/x86: make AMD IBS hotplug capable
oprofile/x86: notify cpus only when daemon is running
oprofile/x86: reordering some functions
oprofile/x86: stop disabled counters in nmi handler
oprofile/x86: protect cpu hotplug sections
oprofile/x86: remove CONFIG_SMP macros
oprofile/x86: fix uninitialized counter usage during cpu hotplug
oprofile/x86: remove duplicate IBS capability check
oprofile/x86: move IBS code
oprofile/x86: return -EBUSY if counters are already reserved
oprofile/x86: moving shutdown functions
oprofile/x86: reserve counter msrs pairwise
oprofile/x86: rework error handler in nmi_setup()
oprofile: update file list in MAINTAINERS file
oprofile: protect from not being in an IRQ context
oprofile: remove double ring buffering
ring-buffer: Add lost event count to end of sub buffer
tracing: Show the lost events in the trace_pipe output
ring-buffer: Add place holder recording of dropped events
tracing: Fix compile error in module tracepoints when MODULE_UNLOAD not set
...
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/oprofile/nmi_int.c | 199 | ||||
-rw-r--r-- | arch/x86/oprofile/op_model_amd.c | 280 | ||||
-rw-r--r-- | arch/x86/oprofile/op_model_p4.c | 52 | ||||
-rw-r--r-- | arch/x86/oprofile/op_model_ppro.c | 77 | ||||
-rw-r--r-- | arch/x86/oprofile/op_x86_model.h | 4 |
5 files changed, 321 insertions, 291 deletions
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 2c505ee71014..b28d2f1253bb 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -31,8 +31,9 @@ static struct op_x86_model_spec *model; | |||
31 | static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); | 31 | static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); |
32 | static DEFINE_PER_CPU(unsigned long, saved_lvtpc); | 32 | static DEFINE_PER_CPU(unsigned long, saved_lvtpc); |
33 | 33 | ||
34 | /* 0 == registered but off, 1 == registered and on */ | 34 | /* must be protected with get_online_cpus()/put_online_cpus(): */ |
35 | static int nmi_enabled = 0; | 35 | static int nmi_enabled; |
36 | static int ctr_running; | ||
36 | 37 | ||
37 | struct op_counter_config counter_config[OP_MAX_COUNTER]; | 38 | struct op_counter_config counter_config[OP_MAX_COUNTER]; |
38 | 39 | ||
@@ -61,12 +62,16 @@ static int profile_exceptions_notify(struct notifier_block *self, | |||
61 | { | 62 | { |
62 | struct die_args *args = (struct die_args *)data; | 63 | struct die_args *args = (struct die_args *)data; |
63 | int ret = NOTIFY_DONE; | 64 | int ret = NOTIFY_DONE; |
64 | int cpu = smp_processor_id(); | ||
65 | 65 | ||
66 | switch (val) { | 66 | switch (val) { |
67 | case DIE_NMI: | 67 | case DIE_NMI: |
68 | case DIE_NMI_IPI: | 68 | case DIE_NMI_IPI: |
69 | model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu)); | 69 | if (ctr_running) |
70 | model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs)); | ||
71 | else if (!nmi_enabled) | ||
72 | break; | ||
73 | else | ||
74 | model->stop(&__get_cpu_var(cpu_msrs)); | ||
70 | ret = NOTIFY_STOP; | 75 | ret = NOTIFY_STOP; |
71 | break; | 76 | break; |
72 | default: | 77 | default: |
@@ -95,24 +100,36 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs) | |||
95 | static void nmi_cpu_start(void *dummy) | 100 | static void nmi_cpu_start(void *dummy) |
96 | { | 101 | { |
97 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); | 102 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); |
98 | model->start(msrs); | 103 | if (!msrs->controls) |
104 | WARN_ON_ONCE(1); | ||
105 | else | ||
106 | model->start(msrs); | ||
99 | } | 107 | } |
100 | 108 | ||
101 | static int nmi_start(void) | 109 | static int nmi_start(void) |
102 | { | 110 | { |
111 | get_online_cpus(); | ||
103 | on_each_cpu(nmi_cpu_start, NULL, 1); | 112 | on_each_cpu(nmi_cpu_start, NULL, 1); |
113 | ctr_running = 1; | ||
114 | put_online_cpus(); | ||
104 | return 0; | 115 | return 0; |
105 | } | 116 | } |
106 | 117 | ||
107 | static void nmi_cpu_stop(void *dummy) | 118 | static void nmi_cpu_stop(void *dummy) |
108 | { | 119 | { |
109 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); | 120 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); |
110 | model->stop(msrs); | 121 | if (!msrs->controls) |
122 | WARN_ON_ONCE(1); | ||
123 | else | ||
124 | model->stop(msrs); | ||
111 | } | 125 | } |
112 | 126 | ||
113 | static void nmi_stop(void) | 127 | static void nmi_stop(void) |
114 | { | 128 | { |
129 | get_online_cpus(); | ||
115 | on_each_cpu(nmi_cpu_stop, NULL, 1); | 130 | on_each_cpu(nmi_cpu_stop, NULL, 1); |
131 | ctr_running = 0; | ||
132 | put_online_cpus(); | ||
116 | } | 133 | } |
117 | 134 | ||
118 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | 135 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX |
@@ -252,7 +269,10 @@ static int nmi_switch_event(void) | |||
252 | if (nmi_multiplex_on() < 0) | 269 | if (nmi_multiplex_on() < 0) |
253 | return -EINVAL; /* not necessary */ | 270 | return -EINVAL; /* not necessary */ |
254 | 271 | ||
255 | on_each_cpu(nmi_cpu_switch, NULL, 1); | 272 | get_online_cpus(); |
273 | if (ctr_running) | ||
274 | on_each_cpu(nmi_cpu_switch, NULL, 1); | ||
275 | put_online_cpus(); | ||
256 | 276 | ||
257 | return 0; | 277 | return 0; |
258 | } | 278 | } |
@@ -295,6 +315,7 @@ static void free_msrs(void) | |||
295 | kfree(per_cpu(cpu_msrs, i).controls); | 315 | kfree(per_cpu(cpu_msrs, i).controls); |
296 | per_cpu(cpu_msrs, i).controls = NULL; | 316 | per_cpu(cpu_msrs, i).controls = NULL; |
297 | } | 317 | } |
318 | nmi_shutdown_mux(); | ||
298 | } | 319 | } |
299 | 320 | ||
300 | static int allocate_msrs(void) | 321 | static int allocate_msrs(void) |
@@ -307,14 +328,21 @@ static int allocate_msrs(void) | |||
307 | per_cpu(cpu_msrs, i).counters = kzalloc(counters_size, | 328 | per_cpu(cpu_msrs, i).counters = kzalloc(counters_size, |
308 | GFP_KERNEL); | 329 | GFP_KERNEL); |
309 | if (!per_cpu(cpu_msrs, i).counters) | 330 | if (!per_cpu(cpu_msrs, i).counters) |
310 | return 0; | 331 | goto fail; |
311 | per_cpu(cpu_msrs, i).controls = kzalloc(controls_size, | 332 | per_cpu(cpu_msrs, i).controls = kzalloc(controls_size, |
312 | GFP_KERNEL); | 333 | GFP_KERNEL); |
313 | if (!per_cpu(cpu_msrs, i).controls) | 334 | if (!per_cpu(cpu_msrs, i).controls) |
314 | return 0; | 335 | goto fail; |
315 | } | 336 | } |
316 | 337 | ||
338 | if (!nmi_setup_mux()) | ||
339 | goto fail; | ||
340 | |||
317 | return 1; | 341 | return 1; |
342 | |||
343 | fail: | ||
344 | free_msrs(); | ||
345 | return 0; | ||
318 | } | 346 | } |
319 | 347 | ||
320 | static void nmi_cpu_setup(void *dummy) | 348 | static void nmi_cpu_setup(void *dummy) |
@@ -336,49 +364,6 @@ static struct notifier_block profile_exceptions_nb = { | |||
336 | .priority = 2 | 364 | .priority = 2 |
337 | }; | 365 | }; |
338 | 366 | ||
339 | static int nmi_setup(void) | ||
340 | { | ||
341 | int err = 0; | ||
342 | int cpu; | ||
343 | |||
344 | if (!allocate_msrs()) | ||
345 | err = -ENOMEM; | ||
346 | else if (!nmi_setup_mux()) | ||
347 | err = -ENOMEM; | ||
348 | else | ||
349 | err = register_die_notifier(&profile_exceptions_nb); | ||
350 | |||
351 | if (err) { | ||
352 | free_msrs(); | ||
353 | nmi_shutdown_mux(); | ||
354 | return err; | ||
355 | } | ||
356 | |||
357 | /* We need to serialize save and setup for HT because the subset | ||
358 | * of msrs are distinct for save and setup operations | ||
359 | */ | ||
360 | |||
361 | /* Assume saved/restored counters are the same on all CPUs */ | ||
362 | model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); | ||
363 | for_each_possible_cpu(cpu) { | ||
364 | if (!cpu) | ||
365 | continue; | ||
366 | |||
367 | memcpy(per_cpu(cpu_msrs, cpu).counters, | ||
368 | per_cpu(cpu_msrs, 0).counters, | ||
369 | sizeof(struct op_msr) * model->num_counters); | ||
370 | |||
371 | memcpy(per_cpu(cpu_msrs, cpu).controls, | ||
372 | per_cpu(cpu_msrs, 0).controls, | ||
373 | sizeof(struct op_msr) * model->num_controls); | ||
374 | |||
375 | mux_clone(cpu); | ||
376 | } | ||
377 | on_each_cpu(nmi_cpu_setup, NULL, 1); | ||
378 | nmi_enabled = 1; | ||
379 | return 0; | ||
380 | } | ||
381 | |||
382 | static void nmi_cpu_restore_registers(struct op_msrs *msrs) | 367 | static void nmi_cpu_restore_registers(struct op_msrs *msrs) |
383 | { | 368 | { |
384 | struct op_msr *counters = msrs->counters; | 369 | struct op_msr *counters = msrs->counters; |
@@ -412,20 +397,24 @@ static void nmi_cpu_shutdown(void *dummy) | |||
412 | apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); | 397 | apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); |
413 | apic_write(APIC_LVTERR, v); | 398 | apic_write(APIC_LVTERR, v); |
414 | nmi_cpu_restore_registers(msrs); | 399 | nmi_cpu_restore_registers(msrs); |
400 | if (model->cpu_down) | ||
401 | model->cpu_down(); | ||
415 | } | 402 | } |
416 | 403 | ||
417 | static void nmi_shutdown(void) | 404 | static void nmi_cpu_up(void *dummy) |
418 | { | 405 | { |
419 | struct op_msrs *msrs; | 406 | if (nmi_enabled) |
407 | nmi_cpu_setup(dummy); | ||
408 | if (ctr_running) | ||
409 | nmi_cpu_start(dummy); | ||
410 | } | ||
420 | 411 | ||
421 | nmi_enabled = 0; | 412 | static void nmi_cpu_down(void *dummy) |
422 | on_each_cpu(nmi_cpu_shutdown, NULL, 1); | 413 | { |
423 | unregister_die_notifier(&profile_exceptions_nb); | 414 | if (ctr_running) |
424 | nmi_shutdown_mux(); | 415 | nmi_cpu_stop(dummy); |
425 | msrs = &get_cpu_var(cpu_msrs); | 416 | if (nmi_enabled) |
426 | model->shutdown(msrs); | 417 | nmi_cpu_shutdown(dummy); |
427 | free_msrs(); | ||
428 | put_cpu_var(cpu_msrs); | ||
429 | } | 418 | } |
430 | 419 | ||
431 | static int nmi_create_files(struct super_block *sb, struct dentry *root) | 420 | static int nmi_create_files(struct super_block *sb, struct dentry *root) |
@@ -457,7 +446,6 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root) | |||
457 | return 0; | 446 | return 0; |
458 | } | 447 | } |
459 | 448 | ||
460 | #ifdef CONFIG_SMP | ||
461 | static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action, | 449 | static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action, |
462 | void *data) | 450 | void *data) |
463 | { | 451 | { |
@@ -465,10 +453,10 @@ static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action, | |||
465 | switch (action) { | 453 | switch (action) { |
466 | case CPU_DOWN_FAILED: | 454 | case CPU_DOWN_FAILED: |
467 | case CPU_ONLINE: | 455 | case CPU_ONLINE: |
468 | smp_call_function_single(cpu, nmi_cpu_start, NULL, 0); | 456 | smp_call_function_single(cpu, nmi_cpu_up, NULL, 0); |
469 | break; | 457 | break; |
470 | case CPU_DOWN_PREPARE: | 458 | case CPU_DOWN_PREPARE: |
471 | smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1); | 459 | smp_call_function_single(cpu, nmi_cpu_down, NULL, 1); |
472 | break; | 460 | break; |
473 | } | 461 | } |
474 | return NOTIFY_DONE; | 462 | return NOTIFY_DONE; |
@@ -477,7 +465,75 @@ static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action, | |||
477 | static struct notifier_block oprofile_cpu_nb = { | 465 | static struct notifier_block oprofile_cpu_nb = { |
478 | .notifier_call = oprofile_cpu_notifier | 466 | .notifier_call = oprofile_cpu_notifier |
479 | }; | 467 | }; |
480 | #endif | 468 | |
469 | static int nmi_setup(void) | ||
470 | { | ||
471 | int err = 0; | ||
472 | int cpu; | ||
473 | |||
474 | if (!allocate_msrs()) | ||
475 | return -ENOMEM; | ||
476 | |||
477 | /* We need to serialize save and setup for HT because the subset | ||
478 | * of msrs are distinct for save and setup operations | ||
479 | */ | ||
480 | |||
481 | /* Assume saved/restored counters are the same on all CPUs */ | ||
482 | err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); | ||
483 | if (err) | ||
484 | goto fail; | ||
485 | |||
486 | for_each_possible_cpu(cpu) { | ||
487 | if (!cpu) | ||
488 | continue; | ||
489 | |||
490 | memcpy(per_cpu(cpu_msrs, cpu).counters, | ||
491 | per_cpu(cpu_msrs, 0).counters, | ||
492 | sizeof(struct op_msr) * model->num_counters); | ||
493 | |||
494 | memcpy(per_cpu(cpu_msrs, cpu).controls, | ||
495 | per_cpu(cpu_msrs, 0).controls, | ||
496 | sizeof(struct op_msr) * model->num_controls); | ||
497 | |||
498 | mux_clone(cpu); | ||
499 | } | ||
500 | |||
501 | nmi_enabled = 0; | ||
502 | ctr_running = 0; | ||
503 | barrier(); | ||
504 | err = register_die_notifier(&profile_exceptions_nb); | ||
505 | if (err) | ||
506 | goto fail; | ||
507 | |||
508 | get_online_cpus(); | ||
509 | register_cpu_notifier(&oprofile_cpu_nb); | ||
510 | on_each_cpu(nmi_cpu_setup, NULL, 1); | ||
511 | nmi_enabled = 1; | ||
512 | put_online_cpus(); | ||
513 | |||
514 | return 0; | ||
515 | fail: | ||
516 | free_msrs(); | ||
517 | return err; | ||
518 | } | ||
519 | |||
520 | static void nmi_shutdown(void) | ||
521 | { | ||
522 | struct op_msrs *msrs; | ||
523 | |||
524 | get_online_cpus(); | ||
525 | unregister_cpu_notifier(&oprofile_cpu_nb); | ||
526 | on_each_cpu(nmi_cpu_shutdown, NULL, 1); | ||
527 | nmi_enabled = 0; | ||
528 | ctr_running = 0; | ||
529 | put_online_cpus(); | ||
530 | barrier(); | ||
531 | unregister_die_notifier(&profile_exceptions_nb); | ||
532 | msrs = &get_cpu_var(cpu_msrs); | ||
533 | model->shutdown(msrs); | ||
534 | free_msrs(); | ||
535 | put_cpu_var(cpu_msrs); | ||
536 | } | ||
481 | 537 | ||
482 | #ifdef CONFIG_PM | 538 | #ifdef CONFIG_PM |
483 | 539 | ||
@@ -687,9 +743,6 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
687 | return -ENODEV; | 743 | return -ENODEV; |
688 | } | 744 | } |
689 | 745 | ||
690 | #ifdef CONFIG_SMP | ||
691 | register_cpu_notifier(&oprofile_cpu_nb); | ||
692 | #endif | ||
693 | /* default values, can be overwritten by model */ | 746 | /* default values, can be overwritten by model */ |
694 | ops->create_files = nmi_create_files; | 747 | ops->create_files = nmi_create_files; |
695 | ops->setup = nmi_setup; | 748 | ops->setup = nmi_setup; |
@@ -716,12 +769,6 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
716 | 769 | ||
717 | void op_nmi_exit(void) | 770 | void op_nmi_exit(void) |
718 | { | 771 | { |
719 | if (using_nmi) { | 772 | if (using_nmi) |
720 | exit_sysfs(); | 773 | exit_sysfs(); |
721 | #ifdef CONFIG_SMP | ||
722 | unregister_cpu_notifier(&oprofile_cpu_nb); | ||
723 | #endif | ||
724 | } | ||
725 | if (model->exit) | ||
726 | model->exit(); | ||
727 | } | 774 | } |
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 090cbbec7dbd..b67a6b5aa8d4 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c | |||
@@ -30,13 +30,10 @@ | |||
30 | #include "op_counter.h" | 30 | #include "op_counter.h" |
31 | 31 | ||
32 | #define NUM_COUNTERS 4 | 32 | #define NUM_COUNTERS 4 |
33 | #define NUM_CONTROLS 4 | ||
34 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | 33 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX |
35 | #define NUM_VIRT_COUNTERS 32 | 34 | #define NUM_VIRT_COUNTERS 32 |
36 | #define NUM_VIRT_CONTROLS 32 | ||
37 | #else | 35 | #else |
38 | #define NUM_VIRT_COUNTERS NUM_COUNTERS | 36 | #define NUM_VIRT_COUNTERS NUM_COUNTERS |
39 | #define NUM_VIRT_CONTROLS NUM_CONTROLS | ||
40 | #endif | 37 | #endif |
41 | 38 | ||
42 | #define OP_EVENT_MASK 0x0FFF | 39 | #define OP_EVENT_MASK 0x0FFF |
@@ -105,102 +102,6 @@ static u32 get_ibs_caps(void) | |||
105 | return ibs_caps; | 102 | return ibs_caps; |
106 | } | 103 | } |
107 | 104 | ||
108 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
109 | |||
110 | static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, | ||
111 | struct op_msrs const * const msrs) | ||
112 | { | ||
113 | u64 val; | ||
114 | int i; | ||
115 | |||
116 | /* enable active counters */ | ||
117 | for (i = 0; i < NUM_COUNTERS; ++i) { | ||
118 | int virt = op_x86_phys_to_virt(i); | ||
119 | if (!reset_value[virt]) | ||
120 | continue; | ||
121 | rdmsrl(msrs->controls[i].addr, val); | ||
122 | val &= model->reserved; | ||
123 | val |= op_x86_get_ctrl(model, &counter_config[virt]); | ||
124 | wrmsrl(msrs->controls[i].addr, val); | ||
125 | } | ||
126 | } | ||
127 | |||
128 | #endif | ||
129 | |||
130 | /* functions for op_amd_spec */ | ||
131 | |||
132 | static void op_amd_fill_in_addresses(struct op_msrs * const msrs) | ||
133 | { | ||
134 | int i; | ||
135 | |||
136 | for (i = 0; i < NUM_COUNTERS; i++) { | ||
137 | if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) | ||
138 | msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; | ||
139 | } | ||
140 | |||
141 | for (i = 0; i < NUM_CONTROLS; i++) { | ||
142 | if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) | ||
143 | msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; | ||
144 | } | ||
145 | } | ||
146 | |||
147 | static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, | ||
148 | struct op_msrs const * const msrs) | ||
149 | { | ||
150 | u64 val; | ||
151 | int i; | ||
152 | |||
153 | /* setup reset_value */ | ||
154 | for (i = 0; i < NUM_VIRT_COUNTERS; ++i) { | ||
155 | if (counter_config[i].enabled | ||
156 | && msrs->counters[op_x86_virt_to_phys(i)].addr) | ||
157 | reset_value[i] = counter_config[i].count; | ||
158 | else | ||
159 | reset_value[i] = 0; | ||
160 | } | ||
161 | |||
162 | /* clear all counters */ | ||
163 | for (i = 0; i < NUM_CONTROLS; ++i) { | ||
164 | if (unlikely(!msrs->controls[i].addr)) { | ||
165 | if (counter_config[i].enabled && !smp_processor_id()) | ||
166 | /* | ||
167 | * counter is reserved, this is on all | ||
168 | * cpus, so report only for cpu #0 | ||
169 | */ | ||
170 | op_x86_warn_reserved(i); | ||
171 | continue; | ||
172 | } | ||
173 | rdmsrl(msrs->controls[i].addr, val); | ||
174 | if (val & ARCH_PERFMON_EVENTSEL_ENABLE) | ||
175 | op_x86_warn_in_use(i); | ||
176 | val &= model->reserved; | ||
177 | wrmsrl(msrs->controls[i].addr, val); | ||
178 | } | ||
179 | |||
180 | /* avoid a false detection of ctr overflows in NMI handler */ | ||
181 | for (i = 0; i < NUM_COUNTERS; ++i) { | ||
182 | if (unlikely(!msrs->counters[i].addr)) | ||
183 | continue; | ||
184 | wrmsrl(msrs->counters[i].addr, -1LL); | ||
185 | } | ||
186 | |||
187 | /* enable active counters */ | ||
188 | for (i = 0; i < NUM_COUNTERS; ++i) { | ||
189 | int virt = op_x86_phys_to_virt(i); | ||
190 | if (!reset_value[virt]) | ||
191 | continue; | ||
192 | |||
193 | /* setup counter registers */ | ||
194 | wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]); | ||
195 | |||
196 | /* setup control registers */ | ||
197 | rdmsrl(msrs->controls[i].addr, val); | ||
198 | val &= model->reserved; | ||
199 | val |= op_x86_get_ctrl(model, &counter_config[virt]); | ||
200 | wrmsrl(msrs->controls[i].addr, val); | ||
201 | } | ||
202 | } | ||
203 | |||
204 | /* | 105 | /* |
205 | * 16-bit Linear Feedback Shift Register (LFSR) | 106 | * 16-bit Linear Feedback Shift Register (LFSR) |
206 | * | 107 | * |
@@ -365,6 +266,125 @@ static void op_amd_stop_ibs(void) | |||
365 | wrmsrl(MSR_AMD64_IBSOPCTL, 0); | 266 | wrmsrl(MSR_AMD64_IBSOPCTL, 0); |
366 | } | 267 | } |
367 | 268 | ||
269 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
270 | |||
271 | static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, | ||
272 | struct op_msrs const * const msrs) | ||
273 | { | ||
274 | u64 val; | ||
275 | int i; | ||
276 | |||
277 | /* enable active counters */ | ||
278 | for (i = 0; i < NUM_COUNTERS; ++i) { | ||
279 | int virt = op_x86_phys_to_virt(i); | ||
280 | if (!reset_value[virt]) | ||
281 | continue; | ||
282 | rdmsrl(msrs->controls[i].addr, val); | ||
283 | val &= model->reserved; | ||
284 | val |= op_x86_get_ctrl(model, &counter_config[virt]); | ||
285 | wrmsrl(msrs->controls[i].addr, val); | ||
286 | } | ||
287 | } | ||
288 | |||
289 | #endif | ||
290 | |||
291 | /* functions for op_amd_spec */ | ||
292 | |||
293 | static void op_amd_shutdown(struct op_msrs const * const msrs) | ||
294 | { | ||
295 | int i; | ||
296 | |||
297 | for (i = 0; i < NUM_COUNTERS; ++i) { | ||
298 | if (!msrs->counters[i].addr) | ||
299 | continue; | ||
300 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); | ||
301 | release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | ||
302 | } | ||
303 | } | ||
304 | |||
305 | static int op_amd_fill_in_addresses(struct op_msrs * const msrs) | ||
306 | { | ||
307 | int i; | ||
308 | |||
309 | for (i = 0; i < NUM_COUNTERS; i++) { | ||
310 | if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) | ||
311 | goto fail; | ||
312 | if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) { | ||
313 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); | ||
314 | goto fail; | ||
315 | } | ||
316 | /* both registers must be reserved */ | ||
317 | msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; | ||
318 | msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; | ||
319 | continue; | ||
320 | fail: | ||
321 | if (!counter_config[i].enabled) | ||
322 | continue; | ||
323 | op_x86_warn_reserved(i); | ||
324 | op_amd_shutdown(msrs); | ||
325 | return -EBUSY; | ||
326 | } | ||
327 | |||
328 | return 0; | ||
329 | } | ||
330 | |||
331 | static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, | ||
332 | struct op_msrs const * const msrs) | ||
333 | { | ||
334 | u64 val; | ||
335 | int i; | ||
336 | |||
337 | /* setup reset_value */ | ||
338 | for (i = 0; i < NUM_VIRT_COUNTERS; ++i) { | ||
339 | if (counter_config[i].enabled | ||
340 | && msrs->counters[op_x86_virt_to_phys(i)].addr) | ||
341 | reset_value[i] = counter_config[i].count; | ||
342 | else | ||
343 | reset_value[i] = 0; | ||
344 | } | ||
345 | |||
346 | /* clear all counters */ | ||
347 | for (i = 0; i < NUM_COUNTERS; ++i) { | ||
348 | if (!msrs->controls[i].addr) | ||
349 | continue; | ||
350 | rdmsrl(msrs->controls[i].addr, val); | ||
351 | if (val & ARCH_PERFMON_EVENTSEL_ENABLE) | ||
352 | op_x86_warn_in_use(i); | ||
353 | val &= model->reserved; | ||
354 | wrmsrl(msrs->controls[i].addr, val); | ||
355 | /* | ||
356 | * avoid a false detection of ctr overflows in NMI | ||
357 | * handler | ||
358 | */ | ||
359 | wrmsrl(msrs->counters[i].addr, -1LL); | ||
360 | } | ||
361 | |||
362 | /* enable active counters */ | ||
363 | for (i = 0; i < NUM_COUNTERS; ++i) { | ||
364 | int virt = op_x86_phys_to_virt(i); | ||
365 | if (!reset_value[virt]) | ||
366 | continue; | ||
367 | |||
368 | /* setup counter registers */ | ||
369 | wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]); | ||
370 | |||
371 | /* setup control registers */ | ||
372 | rdmsrl(msrs->controls[i].addr, val); | ||
373 | val &= model->reserved; | ||
374 | val |= op_x86_get_ctrl(model, &counter_config[virt]); | ||
375 | wrmsrl(msrs->controls[i].addr, val); | ||
376 | } | ||
377 | |||
378 | if (ibs_caps) | ||
379 | setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0); | ||
380 | } | ||
381 | |||
382 | static void op_amd_cpu_shutdown(void) | ||
383 | { | ||
384 | if (ibs_caps) | ||
385 | setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1); | ||
386 | } | ||
387 | |||
368 | static int op_amd_check_ctrs(struct pt_regs * const regs, | 388 | static int op_amd_check_ctrs(struct pt_regs * const regs, |
369 | struct op_msrs const * const msrs) | 389 | struct op_msrs const * const msrs) |
370 | { | 390 | { |
@@ -425,42 +445,16 @@ static void op_amd_stop(struct op_msrs const * const msrs) | |||
425 | op_amd_stop_ibs(); | 445 | op_amd_stop_ibs(); |
426 | } | 446 | } |
427 | 447 | ||
428 | static void op_amd_shutdown(struct op_msrs const * const msrs) | 448 | static int __init_ibs_nmi(void) |
429 | { | ||
430 | int i; | ||
431 | |||
432 | for (i = 0; i < NUM_COUNTERS; ++i) { | ||
433 | if (msrs->counters[i].addr) | ||
434 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); | ||
435 | } | ||
436 | for (i = 0; i < NUM_CONTROLS; ++i) { | ||
437 | if (msrs->controls[i].addr) | ||
438 | release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | ||
439 | } | ||
440 | } | ||
441 | |||
442 | static u8 ibs_eilvt_off; | ||
443 | |||
444 | static inline void apic_init_ibs_nmi_per_cpu(void *arg) | ||
445 | { | ||
446 | ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0); | ||
447 | } | ||
448 | |||
449 | static inline void apic_clear_ibs_nmi_per_cpu(void *arg) | ||
450 | { | ||
451 | setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1); | ||
452 | } | ||
453 | |||
454 | static int init_ibs_nmi(void) | ||
455 | { | 449 | { |
456 | #define IBSCTL_LVTOFFSETVAL (1 << 8) | 450 | #define IBSCTL_LVTOFFSETVAL (1 << 8) |
457 | #define IBSCTL 0x1cc | 451 | #define IBSCTL 0x1cc |
458 | struct pci_dev *cpu_cfg; | 452 | struct pci_dev *cpu_cfg; |
459 | int nodes; | 453 | int nodes; |
460 | u32 value = 0; | 454 | u32 value = 0; |
455 | u8 ibs_eilvt_off; | ||
461 | 456 | ||
462 | /* per CPU setup */ | 457 | ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1); |
463 | on_each_cpu(apic_init_ibs_nmi_per_cpu, NULL, 1); | ||
464 | 458 | ||
465 | nodes = 0; | 459 | nodes = 0; |
466 | cpu_cfg = NULL; | 460 | cpu_cfg = NULL; |
@@ -490,22 +484,15 @@ static int init_ibs_nmi(void) | |||
490 | return 0; | 484 | return 0; |
491 | } | 485 | } |
492 | 486 | ||
493 | /* uninitialize the APIC for the IBS interrupts if needed */ | ||
494 | static void clear_ibs_nmi(void) | ||
495 | { | ||
496 | if (ibs_caps) | ||
497 | on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1); | ||
498 | } | ||
499 | |||
500 | /* initialize the APIC for the IBS interrupts if available */ | 487 | /* initialize the APIC for the IBS interrupts if available */ |
501 | static void ibs_init(void) | 488 | static void init_ibs(void) |
502 | { | 489 | { |
503 | ibs_caps = get_ibs_caps(); | 490 | ibs_caps = get_ibs_caps(); |
504 | 491 | ||
505 | if (!ibs_caps) | 492 | if (!ibs_caps) |
506 | return; | 493 | return; |
507 | 494 | ||
508 | if (init_ibs_nmi()) { | 495 | if (__init_ibs_nmi()) { |
509 | ibs_caps = 0; | 496 | ibs_caps = 0; |
510 | return; | 497 | return; |
511 | } | 498 | } |
@@ -514,14 +501,6 @@ static void ibs_init(void) | |||
514 | (unsigned)ibs_caps); | 501 | (unsigned)ibs_caps); |
515 | } | 502 | } |
516 | 503 | ||
517 | static void ibs_exit(void) | ||
518 | { | ||
519 | if (!ibs_caps) | ||
520 | return; | ||
521 | |||
522 | clear_ibs_nmi(); | ||
523 | } | ||
524 | |||
525 | static int (*create_arch_files)(struct super_block *sb, struct dentry *root); | 504 | static int (*create_arch_files)(struct super_block *sb, struct dentry *root); |
526 | 505 | ||
527 | static int setup_ibs_files(struct super_block *sb, struct dentry *root) | 506 | static int setup_ibs_files(struct super_block *sb, struct dentry *root) |
@@ -570,27 +549,22 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root) | |||
570 | 549 | ||
571 | static int op_amd_init(struct oprofile_operations *ops) | 550 | static int op_amd_init(struct oprofile_operations *ops) |
572 | { | 551 | { |
573 | ibs_init(); | 552 | init_ibs(); |
574 | create_arch_files = ops->create_files; | 553 | create_arch_files = ops->create_files; |
575 | ops->create_files = setup_ibs_files; | 554 | ops->create_files = setup_ibs_files; |
576 | return 0; | 555 | return 0; |
577 | } | 556 | } |
578 | 557 | ||
579 | static void op_amd_exit(void) | ||
580 | { | ||
581 | ibs_exit(); | ||
582 | } | ||
583 | |||
584 | struct op_x86_model_spec op_amd_spec = { | 558 | struct op_x86_model_spec op_amd_spec = { |
585 | .num_counters = NUM_COUNTERS, | 559 | .num_counters = NUM_COUNTERS, |
586 | .num_controls = NUM_CONTROLS, | 560 | .num_controls = NUM_COUNTERS, |
587 | .num_virt_counters = NUM_VIRT_COUNTERS, | 561 | .num_virt_counters = NUM_VIRT_COUNTERS, |
588 | .reserved = MSR_AMD_EVENTSEL_RESERVED, | 562 | .reserved = MSR_AMD_EVENTSEL_RESERVED, |
589 | .event_mask = OP_EVENT_MASK, | 563 | .event_mask = OP_EVENT_MASK, |
590 | .init = op_amd_init, | 564 | .init = op_amd_init, |
591 | .exit = op_amd_exit, | ||
592 | .fill_in_addresses = &op_amd_fill_in_addresses, | 565 | .fill_in_addresses = &op_amd_fill_in_addresses, |
593 | .setup_ctrs = &op_amd_setup_ctrs, | 566 | .setup_ctrs = &op_amd_setup_ctrs, |
567 | .cpu_down = &op_amd_cpu_shutdown, | ||
594 | .check_ctrs = &op_amd_check_ctrs, | 568 | .check_ctrs = &op_amd_check_ctrs, |
595 | .start = &op_amd_start, | 569 | .start = &op_amd_start, |
596 | .stop = &op_amd_stop, | 570 | .stop = &op_amd_stop, |
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index e6a160a4684a..182558dd5515 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c | |||
@@ -385,8 +385,26 @@ static unsigned int get_stagger(void) | |||
385 | 385 | ||
386 | static unsigned long reset_value[NUM_COUNTERS_NON_HT]; | 386 | static unsigned long reset_value[NUM_COUNTERS_NON_HT]; |
387 | 387 | ||
388 | static void p4_shutdown(struct op_msrs const * const msrs) | ||
389 | { | ||
390 | int i; | ||
388 | 391 | ||
389 | static void p4_fill_in_addresses(struct op_msrs * const msrs) | 392 | for (i = 0; i < num_counters; ++i) { |
393 | if (msrs->counters[i].addr) | ||
394 | release_perfctr_nmi(msrs->counters[i].addr); | ||
395 | } | ||
396 | /* | ||
397 | * some of the control registers are specially reserved in | ||
398 | * conjunction with the counter registers (hence the starting offset). | ||
399 | * This saves a few bits. | ||
400 | */ | ||
401 | for (i = num_counters; i < num_controls; ++i) { | ||
402 | if (msrs->controls[i].addr) | ||
403 | release_evntsel_nmi(msrs->controls[i].addr); | ||
404 | } | ||
405 | } | ||
406 | |||
407 | static int p4_fill_in_addresses(struct op_msrs * const msrs) | ||
390 | { | 408 | { |
391 | unsigned int i; | 409 | unsigned int i; |
392 | unsigned int addr, cccraddr, stag; | 410 | unsigned int addr, cccraddr, stag; |
@@ -468,6 +486,18 @@ static void p4_fill_in_addresses(struct op_msrs * const msrs) | |||
468 | msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; | 486 | msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; |
469 | } | 487 | } |
470 | } | 488 | } |
489 | |||
490 | for (i = 0; i < num_counters; ++i) { | ||
491 | if (!counter_config[i].enabled) | ||
492 | continue; | ||
493 | if (msrs->controls[i].addr) | ||
494 | continue; | ||
495 | op_x86_warn_reserved(i); | ||
496 | p4_shutdown(msrs); | ||
497 | return -EBUSY; | ||
498 | } | ||
499 | |||
500 | return 0; | ||
471 | } | 501 | } |
472 | 502 | ||
473 | 503 | ||
@@ -668,26 +698,6 @@ static void p4_stop(struct op_msrs const * const msrs) | |||
668 | } | 698 | } |
669 | } | 699 | } |
670 | 700 | ||
671 | static void p4_shutdown(struct op_msrs const * const msrs) | ||
672 | { | ||
673 | int i; | ||
674 | |||
675 | for (i = 0; i < num_counters; ++i) { | ||
676 | if (msrs->counters[i].addr) | ||
677 | release_perfctr_nmi(msrs->counters[i].addr); | ||
678 | } | ||
679 | /* | ||
680 | * some of the control registers are specially reserved in | ||
681 | * conjunction with the counter registers (hence the starting offset). | ||
682 | * This saves a few bits. | ||
683 | */ | ||
684 | for (i = num_counters; i < num_controls; ++i) { | ||
685 | if (msrs->controls[i].addr) | ||
686 | release_evntsel_nmi(msrs->controls[i].addr); | ||
687 | } | ||
688 | } | ||
689 | |||
690 | |||
691 | #ifdef CONFIG_SMP | 701 | #ifdef CONFIG_SMP |
692 | struct op_x86_model_spec op_p4_ht2_spec = { | 702 | struct op_x86_model_spec op_p4_ht2_spec = { |
693 | .num_counters = NUM_COUNTERS_HT2, | 703 | .num_counters = NUM_COUNTERS_HT2, |
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 2bf90fafa7b5..1fd17cfb956b 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c | |||
@@ -30,19 +30,46 @@ static int counter_width = 32; | |||
30 | 30 | ||
31 | static u64 *reset_value; | 31 | static u64 *reset_value; |
32 | 32 | ||
33 | static void ppro_fill_in_addresses(struct op_msrs * const msrs) | 33 | static void ppro_shutdown(struct op_msrs const * const msrs) |
34 | { | 34 | { |
35 | int i; | 35 | int i; |
36 | 36 | ||
37 | for (i = 0; i < num_counters; i++) { | 37 | for (i = 0; i < num_counters; ++i) { |
38 | if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) | 38 | if (!msrs->counters[i].addr) |
39 | msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; | 39 | continue; |
40 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); | ||
41 | release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); | ||
42 | } | ||
43 | if (reset_value) { | ||
44 | kfree(reset_value); | ||
45 | reset_value = NULL; | ||
40 | } | 46 | } |
47 | } | ||
48 | |||
49 | static int ppro_fill_in_addresses(struct op_msrs * const msrs) | ||
50 | { | ||
51 | int i; | ||
41 | 52 | ||
42 | for (i = 0; i < num_counters; i++) { | 53 | for (i = 0; i < num_counters; i++) { |
43 | if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) | 54 | if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) |
44 | msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; | 55 | goto fail; |
56 | if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) { | ||
57 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); | ||
58 | goto fail; | ||
59 | } | ||
60 | /* both registers must be reserved */ | ||
61 | msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; | ||
62 | msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; | ||
63 | continue; | ||
64 | fail: | ||
65 | if (!counter_config[i].enabled) | ||
66 | continue; | ||
67 | op_x86_warn_reserved(i); | ||
68 | ppro_shutdown(msrs); | ||
69 | return -EBUSY; | ||
45 | } | 70 | } |
71 | |||
72 | return 0; | ||
46 | } | 73 | } |
47 | 74 | ||
48 | 75 | ||
@@ -78,26 +105,17 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model, | |||
78 | 105 | ||
79 | /* clear all counters */ | 106 | /* clear all counters */ |
80 | for (i = 0; i < num_counters; ++i) { | 107 | for (i = 0; i < num_counters; ++i) { |
81 | if (unlikely(!msrs->controls[i].addr)) { | 108 | if (!msrs->controls[i].addr) |
82 | if (counter_config[i].enabled && !smp_processor_id()) | ||
83 | /* | ||
84 | * counter is reserved, this is on all | ||
85 | * cpus, so report only for cpu #0 | ||
86 | */ | ||
87 | op_x86_warn_reserved(i); | ||
88 | continue; | 109 | continue; |
89 | } | ||
90 | rdmsrl(msrs->controls[i].addr, val); | 110 | rdmsrl(msrs->controls[i].addr, val); |
91 | if (val & ARCH_PERFMON_EVENTSEL_ENABLE) | 111 | if (val & ARCH_PERFMON_EVENTSEL_ENABLE) |
92 | op_x86_warn_in_use(i); | 112 | op_x86_warn_in_use(i); |
93 | val &= model->reserved; | 113 | val &= model->reserved; |
94 | wrmsrl(msrs->controls[i].addr, val); | 114 | wrmsrl(msrs->controls[i].addr, val); |
95 | } | 115 | /* |
96 | 116 | * avoid a false detection of ctr overflows in NMI * | |
97 | /* avoid a false detection of ctr overflows in NMI handler */ | 117 | * handler |
98 | for (i = 0; i < num_counters; ++i) { | 118 | */ |
99 | if (unlikely(!msrs->counters[i].addr)) | ||
100 | continue; | ||
101 | wrmsrl(msrs->counters[i].addr, -1LL); | 119 | wrmsrl(msrs->counters[i].addr, -1LL); |
102 | } | 120 | } |
103 | 121 | ||
@@ -189,25 +207,6 @@ static void ppro_stop(struct op_msrs const * const msrs) | |||
189 | } | 207 | } |
190 | } | 208 | } |
191 | 209 | ||
192 | static void ppro_shutdown(struct op_msrs const * const msrs) | ||
193 | { | ||
194 | int i; | ||
195 | |||
196 | for (i = 0; i < num_counters; ++i) { | ||
197 | if (msrs->counters[i].addr) | ||
198 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); | ||
199 | } | ||
200 | for (i = 0; i < num_counters; ++i) { | ||
201 | if (msrs->controls[i].addr) | ||
202 | release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); | ||
203 | } | ||
204 | if (reset_value) { | ||
205 | kfree(reset_value); | ||
206 | reset_value = NULL; | ||
207 | } | ||
208 | } | ||
209 | |||
210 | |||
211 | struct op_x86_model_spec op_ppro_spec = { | 210 | struct op_x86_model_spec op_ppro_spec = { |
212 | .num_counters = 2, | 211 | .num_counters = 2, |
213 | .num_controls = 2, | 212 | .num_controls = 2, |
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index ff82a755edd4..89017fa1fd63 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h | |||
@@ -40,10 +40,10 @@ struct op_x86_model_spec { | |||
40 | u64 reserved; | 40 | u64 reserved; |
41 | u16 event_mask; | 41 | u16 event_mask; |
42 | int (*init)(struct oprofile_operations *ops); | 42 | int (*init)(struct oprofile_operations *ops); |
43 | void (*exit)(void); | 43 | int (*fill_in_addresses)(struct op_msrs * const msrs); |
44 | void (*fill_in_addresses)(struct op_msrs * const msrs); | ||
45 | void (*setup_ctrs)(struct op_x86_model_spec const *model, | 44 | void (*setup_ctrs)(struct op_x86_model_spec const *model, |
46 | struct op_msrs const * const msrs); | 45 | struct op_msrs const * const msrs); |
46 | void (*cpu_down)(void); | ||
47 | int (*check_ctrs)(struct pt_regs * const regs, | 47 | int (*check_ctrs)(struct pt_regs * const regs, |
48 | struct op_msrs const * const msrs); | 48 | struct op_msrs const * const msrs); |
49 | void (*start)(struct op_msrs const * const msrs); | 49 | void (*start)(struct op_msrs const * const msrs); |