diff options
-rw-r--r-- | arch/i386/kernel/nmi.c | 188 | ||||
-rw-r--r-- | arch/x86_64/kernel/nmi.c | 178 | ||||
-rw-r--r-- | include/asm-i386/nmi.h | 7 | ||||
-rw-r--r-- | include/asm-x86_64/nmi.h | 8 |
4 files changed, 308 insertions, 73 deletions
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 1282d70ff971..5d58dfeacd59 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
@@ -34,6 +34,20 @@ static unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */ | |||
34 | static unsigned int nmi_p4_cccr_val; | 34 | static unsigned int nmi_p4_cccr_val; |
35 | extern void show_registers(struct pt_regs *regs); | 35 | extern void show_registers(struct pt_regs *regs); |
36 | 36 | ||
37 | /* perfctr_nmi_owner tracks the ownership of the perfctr registers: | ||
38 | * evtsel_nmi_owner tracks the ownership of the event selection | ||
39 | * - different performance counters/ event selection may be reserved for | ||
40 | * different subsystems this reservation system just tries to coordinate | ||
41 | * things a little | ||
42 | */ | ||
43 | static DEFINE_PER_CPU(unsigned long, perfctr_nmi_owner); | ||
44 | static DEFINE_PER_CPU(unsigned long, evntsel_nmi_owner[3]); | ||
45 | |||
46 | /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's | ||
47 | * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) | ||
48 | */ | ||
49 | #define NMI_MAX_COUNTER_BITS 66 | ||
50 | |||
37 | /* | 51 | /* |
38 | * lapic_nmi_owner tracks the ownership of the lapic NMI hardware: | 52 | * lapic_nmi_owner tracks the ownership of the lapic NMI hardware: |
39 | * - it may be reserved by some other driver, or not | 53 | * - it may be reserved by some other driver, or not |
@@ -95,6 +109,105 @@ int nmi_active; | |||
95 | (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ | 109 | (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ |
96 | P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) | 110 | P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) |
97 | 111 | ||
112 | /* converts an msr to an appropriate reservation bit */ | ||
113 | static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) | ||
114 | { | ||
115 | /* returns the bit offset of the performance counter register */ | ||
116 | switch (boot_cpu_data.x86_vendor) { | ||
117 | case X86_VENDOR_AMD: | ||
118 | return (msr - MSR_K7_PERFCTR0); | ||
119 | case X86_VENDOR_INTEL: | ||
120 | switch (boot_cpu_data.x86) { | ||
121 | case 6: | ||
122 | return (msr - MSR_P6_PERFCTR0); | ||
123 | case 15: | ||
124 | return (msr - MSR_P4_BPU_PERFCTR0); | ||
125 | } | ||
126 | } | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | /* converts an msr to an appropriate reservation bit */ | ||
131 | static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) | ||
132 | { | ||
133 | /* returns the bit offset of the event selection register */ | ||
134 | switch (boot_cpu_data.x86_vendor) { | ||
135 | case X86_VENDOR_AMD: | ||
136 | return (msr - MSR_K7_EVNTSEL0); | ||
137 | case X86_VENDOR_INTEL: | ||
138 | switch (boot_cpu_data.x86) { | ||
139 | case 6: | ||
140 | return (msr - MSR_P6_EVNTSEL0); | ||
141 | case 15: | ||
142 | return (msr - MSR_P4_BSU_ESCR0); | ||
143 | } | ||
144 | } | ||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | /* checks for a bit availability (hack for oprofile) */ | ||
149 | int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) | ||
150 | { | ||
151 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
152 | |||
153 | return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); | ||
154 | } | ||
155 | |||
156 | /* checks the an msr for availability */ | ||
157 | int avail_to_resrv_perfctr_nmi(unsigned int msr) | ||
158 | { | ||
159 | unsigned int counter; | ||
160 | |||
161 | counter = nmi_perfctr_msr_to_bit(msr); | ||
162 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
163 | |||
164 | return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); | ||
165 | } | ||
166 | |||
167 | int reserve_perfctr_nmi(unsigned int msr) | ||
168 | { | ||
169 | unsigned int counter; | ||
170 | |||
171 | counter = nmi_perfctr_msr_to_bit(msr); | ||
172 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
173 | |||
174 | if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner))) | ||
175 | return 1; | ||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | void release_perfctr_nmi(unsigned int msr) | ||
180 | { | ||
181 | unsigned int counter; | ||
182 | |||
183 | counter = nmi_perfctr_msr_to_bit(msr); | ||
184 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
185 | |||
186 | clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner)); | ||
187 | } | ||
188 | |||
189 | int reserve_evntsel_nmi(unsigned int msr) | ||
190 | { | ||
191 | unsigned int counter; | ||
192 | |||
193 | counter = nmi_evntsel_msr_to_bit(msr); | ||
194 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
195 | |||
196 | if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0])) | ||
197 | return 1; | ||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | void release_evntsel_nmi(unsigned int msr) | ||
202 | { | ||
203 | unsigned int counter; | ||
204 | |||
205 | counter = nmi_evntsel_msr_to_bit(msr); | ||
206 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
207 | |||
208 | clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]); | ||
209 | } | ||
210 | |||
98 | #ifdef CONFIG_SMP | 211 | #ifdef CONFIG_SMP |
99 | /* The performance counters used by NMI_LOCAL_APIC don't trigger when | 212 | /* The performance counters used by NMI_LOCAL_APIC don't trigger when |
100 | * the CPU is idle. To make sure the NMI watchdog really ticks on all | 213 | * the CPU is idle. To make sure the NMI watchdog really ticks on all |
@@ -344,14 +457,6 @@ late_initcall(init_lapic_nmi_sysfs); | |||
344 | * Original code written by Keith Owens. | 457 | * Original code written by Keith Owens. |
345 | */ | 458 | */ |
346 | 459 | ||
347 | static void clear_msr_range(unsigned int base, unsigned int n) | ||
348 | { | ||
349 | unsigned int i; | ||
350 | |||
351 | for(i = 0; i < n; ++i) | ||
352 | wrmsr(base+i, 0, 0); | ||
353 | } | ||
354 | |||
355 | static void write_watchdog_counter(const char *descr) | 460 | static void write_watchdog_counter(const char *descr) |
356 | { | 461 | { |
357 | u64 count = (u64)cpu_khz * 1000; | 462 | u64 count = (u64)cpu_khz * 1000; |
@@ -362,14 +467,19 @@ static void write_watchdog_counter(const char *descr) | |||
362 | wrmsrl(nmi_perfctr_msr, 0 - count); | 467 | wrmsrl(nmi_perfctr_msr, 0 - count); |
363 | } | 468 | } |
364 | 469 | ||
365 | static void setup_k7_watchdog(void) | 470 | static int setup_k7_watchdog(void) |
366 | { | 471 | { |
367 | unsigned int evntsel; | 472 | unsigned int evntsel; |
368 | 473 | ||
369 | nmi_perfctr_msr = MSR_K7_PERFCTR0; | 474 | nmi_perfctr_msr = MSR_K7_PERFCTR0; |
370 | 475 | ||
371 | clear_msr_range(MSR_K7_EVNTSEL0, 4); | 476 | if (!reserve_perfctr_nmi(nmi_perfctr_msr)) |
372 | clear_msr_range(MSR_K7_PERFCTR0, 4); | 477 | goto fail; |
478 | |||
479 | if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0)) | ||
480 | goto fail1; | ||
481 | |||
482 | wrmsrl(MSR_K7_PERFCTR0, 0UL); | ||
373 | 483 | ||
374 | evntsel = K7_EVNTSEL_INT | 484 | evntsel = K7_EVNTSEL_INT |
375 | | K7_EVNTSEL_OS | 485 | | K7_EVNTSEL_OS |
@@ -381,16 +491,24 @@ static void setup_k7_watchdog(void) | |||
381 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 491 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
382 | evntsel |= K7_EVNTSEL_ENABLE; | 492 | evntsel |= K7_EVNTSEL_ENABLE; |
383 | wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); | 493 | wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); |
494 | return 1; | ||
495 | fail1: | ||
496 | release_perfctr_nmi(nmi_perfctr_msr); | ||
497 | fail: | ||
498 | return 0; | ||
384 | } | 499 | } |
385 | 500 | ||
386 | static void setup_p6_watchdog(void) | 501 | static int setup_p6_watchdog(void) |
387 | { | 502 | { |
388 | unsigned int evntsel; | 503 | unsigned int evntsel; |
389 | 504 | ||
390 | nmi_perfctr_msr = MSR_P6_PERFCTR0; | 505 | nmi_perfctr_msr = MSR_P6_PERFCTR0; |
391 | 506 | ||
392 | clear_msr_range(MSR_P6_EVNTSEL0, 2); | 507 | if (!reserve_perfctr_nmi(nmi_perfctr_msr)) |
393 | clear_msr_range(MSR_P6_PERFCTR0, 2); | 508 | goto fail; |
509 | |||
510 | if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0)) | ||
511 | goto fail1; | ||
394 | 512 | ||
395 | evntsel = P6_EVNTSEL_INT | 513 | evntsel = P6_EVNTSEL_INT |
396 | | P6_EVNTSEL_OS | 514 | | P6_EVNTSEL_OS |
@@ -402,6 +520,11 @@ static void setup_p6_watchdog(void) | |||
402 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 520 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
403 | evntsel |= P6_EVNTSEL0_ENABLE; | 521 | evntsel |= P6_EVNTSEL0_ENABLE; |
404 | wrmsr(MSR_P6_EVNTSEL0, evntsel, 0); | 522 | wrmsr(MSR_P6_EVNTSEL0, evntsel, 0); |
523 | return 1; | ||
524 | fail1: | ||
525 | release_perfctr_nmi(nmi_perfctr_msr); | ||
526 | fail: | ||
527 | return 0; | ||
405 | } | 528 | } |
406 | 529 | ||
407 | static int setup_p4_watchdog(void) | 530 | static int setup_p4_watchdog(void) |
@@ -419,22 +542,11 @@ static int setup_p4_watchdog(void) | |||
419 | nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1; | 542 | nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1; |
420 | #endif | 543 | #endif |
421 | 544 | ||
422 | if (!(misc_enable & MSR_P4_MISC_ENABLE_PEBS_UNAVAIL)) | 545 | if (!reserve_perfctr_nmi(nmi_perfctr_msr)) |
423 | clear_msr_range(0x3F1, 2); | 546 | goto fail; |
424 | /* MSR 0x3F0 seems to have a default value of 0xFC00, but current | 547 | |
425 | docs doesn't fully define it, so leave it alone for now. */ | 548 | if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0)) |
426 | if (boot_cpu_data.x86_model >= 0x3) { | 549 | goto fail1; |
427 | /* MSR_P4_IQ_ESCR0/1 (0x3ba/0x3bb) removed */ | ||
428 | clear_msr_range(0x3A0, 26); | ||
429 | clear_msr_range(0x3BC, 3); | ||
430 | } else { | ||
431 | clear_msr_range(0x3A0, 31); | ||
432 | } | ||
433 | clear_msr_range(0x3C0, 6); | ||
434 | clear_msr_range(0x3C8, 6); | ||
435 | clear_msr_range(0x3E0, 2); | ||
436 | clear_msr_range(MSR_P4_CCCR0, 18); | ||
437 | clear_msr_range(MSR_P4_PERFCTR0, 18); | ||
438 | 550 | ||
439 | wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0); | 551 | wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0); |
440 | wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0); | 552 | wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0); |
@@ -442,6 +554,10 @@ static int setup_p4_watchdog(void) | |||
442 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 554 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
443 | wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); | 555 | wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); |
444 | return 1; | 556 | return 1; |
557 | fail1: | ||
558 | release_perfctr_nmi(nmi_perfctr_msr); | ||
559 | fail: | ||
560 | return 0; | ||
445 | } | 561 | } |
446 | 562 | ||
447 | void setup_apic_nmi_watchdog (void) | 563 | void setup_apic_nmi_watchdog (void) |
@@ -450,7 +566,8 @@ void setup_apic_nmi_watchdog (void) | |||
450 | case X86_VENDOR_AMD: | 566 | case X86_VENDOR_AMD: |
451 | if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15) | 567 | if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15) |
452 | return; | 568 | return; |
453 | setup_k7_watchdog(); | 569 | if (!setup_k7_watchdog()) |
570 | return; | ||
454 | break; | 571 | break; |
455 | case X86_VENDOR_INTEL: | 572 | case X86_VENDOR_INTEL: |
456 | switch (boot_cpu_data.x86) { | 573 | switch (boot_cpu_data.x86) { |
@@ -458,7 +575,8 @@ void setup_apic_nmi_watchdog (void) | |||
458 | if (boot_cpu_data.x86_model > 0xd) | 575 | if (boot_cpu_data.x86_model > 0xd) |
459 | return; | 576 | return; |
460 | 577 | ||
461 | setup_p6_watchdog(); | 578 | if(!setup_p6_watchdog()) |
579 | return; | ||
462 | break; | 580 | break; |
463 | case 15: | 581 | case 15: |
464 | if (boot_cpu_data.x86_model > 0x4) | 582 | if (boot_cpu_data.x86_model > 0x4) |
@@ -612,6 +730,12 @@ int proc_unknown_nmi_panic(ctl_table *table, int write, struct file *file, | |||
612 | 730 | ||
613 | EXPORT_SYMBOL(nmi_active); | 731 | EXPORT_SYMBOL(nmi_active); |
614 | EXPORT_SYMBOL(nmi_watchdog); | 732 | EXPORT_SYMBOL(nmi_watchdog); |
733 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi); | ||
734 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); | ||
735 | EXPORT_SYMBOL(reserve_perfctr_nmi); | ||
736 | EXPORT_SYMBOL(release_perfctr_nmi); | ||
737 | EXPORT_SYMBOL(reserve_evntsel_nmi); | ||
738 | EXPORT_SYMBOL(release_evntsel_nmi); | ||
615 | EXPORT_SYMBOL(reserve_lapic_nmi); | 739 | EXPORT_SYMBOL(reserve_lapic_nmi); |
616 | EXPORT_SYMBOL(release_lapic_nmi); | 740 | EXPORT_SYMBOL(release_lapic_nmi); |
617 | EXPORT_SYMBOL(disable_timer_nmi_watchdog); | 741 | EXPORT_SYMBOL(disable_timer_nmi_watchdog); |
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c index 42c05d6907b9..b7a7c9973849 100644 --- a/arch/x86_64/kernel/nmi.c +++ b/arch/x86_64/kernel/nmi.c | |||
@@ -27,6 +27,20 @@ | |||
27 | #include <asm/kdebug.h> | 27 | #include <asm/kdebug.h> |
28 | #include <asm/mce.h> | 28 | #include <asm/mce.h> |
29 | 29 | ||
30 | /* perfctr_nmi_owner tracks the ownership of the perfctr registers: | ||
31 | * evtsel_nmi_owner tracks the ownership of the event selection | ||
32 | * - different performance counters/ event selection may be reserved for | ||
33 | * different subsystems this reservation system just tries to coordinate | ||
34 | * things a little | ||
35 | */ | ||
36 | static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner); | ||
37 | static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]); | ||
38 | |||
39 | /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's | ||
40 | * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) | ||
41 | */ | ||
42 | #define NMI_MAX_COUNTER_BITS 66 | ||
43 | |||
30 | /* | 44 | /* |
31 | * lapic_nmi_owner tracks the ownership of the lapic NMI hardware: | 45 | * lapic_nmi_owner tracks the ownership of the lapic NMI hardware: |
32 | * - it may be reserved by some other driver, or not | 46 | * - it may be reserved by some other driver, or not |
@@ -90,6 +104,95 @@ static unsigned int nmi_p4_cccr_val; | |||
90 | (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ | 104 | (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ |
91 | P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) | 105 | P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) |
92 | 106 | ||
107 | /* converts an msr to an appropriate reservation bit */ | ||
108 | static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) | ||
109 | { | ||
110 | /* returns the bit offset of the performance counter register */ | ||
111 | switch (boot_cpu_data.x86_vendor) { | ||
112 | case X86_VENDOR_AMD: | ||
113 | return (msr - MSR_K7_PERFCTR0); | ||
114 | case X86_VENDOR_INTEL: | ||
115 | return (msr - MSR_P4_BPU_PERFCTR0); | ||
116 | } | ||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | /* converts an msr to an appropriate reservation bit */ | ||
121 | static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) | ||
122 | { | ||
123 | /* returns the bit offset of the event selection register */ | ||
124 | switch (boot_cpu_data.x86_vendor) { | ||
125 | case X86_VENDOR_AMD: | ||
126 | return (msr - MSR_K7_EVNTSEL0); | ||
127 | case X86_VENDOR_INTEL: | ||
128 | return (msr - MSR_P4_BSU_ESCR0); | ||
129 | } | ||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | /* checks for a bit availability (hack for oprofile) */ | ||
134 | int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) | ||
135 | { | ||
136 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
137 | |||
138 | return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); | ||
139 | } | ||
140 | |||
141 | /* checks the an msr for availability */ | ||
142 | int avail_to_resrv_perfctr_nmi(unsigned int msr) | ||
143 | { | ||
144 | unsigned int counter; | ||
145 | |||
146 | counter = nmi_perfctr_msr_to_bit(msr); | ||
147 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
148 | |||
149 | return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); | ||
150 | } | ||
151 | |||
152 | int reserve_perfctr_nmi(unsigned int msr) | ||
153 | { | ||
154 | unsigned int counter; | ||
155 | |||
156 | counter = nmi_perfctr_msr_to_bit(msr); | ||
157 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
158 | |||
159 | if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner))) | ||
160 | return 1; | ||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | void release_perfctr_nmi(unsigned int msr) | ||
165 | { | ||
166 | unsigned int counter; | ||
167 | |||
168 | counter = nmi_perfctr_msr_to_bit(msr); | ||
169 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
170 | |||
171 | clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner)); | ||
172 | } | ||
173 | |||
174 | int reserve_evntsel_nmi(unsigned int msr) | ||
175 | { | ||
176 | unsigned int counter; | ||
177 | |||
178 | counter = nmi_evntsel_msr_to_bit(msr); | ||
179 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
180 | |||
181 | if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner))) | ||
182 | return 1; | ||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | void release_evntsel_nmi(unsigned int msr) | ||
187 | { | ||
188 | unsigned int counter; | ||
189 | |||
190 | counter = nmi_evntsel_msr_to_bit(msr); | ||
191 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
192 | |||
193 | clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)); | ||
194 | } | ||
195 | |||
93 | static __cpuinit inline int nmi_known_cpu(void) | 196 | static __cpuinit inline int nmi_known_cpu(void) |
94 | { | 197 | { |
95 | switch (boot_cpu_data.x86_vendor) { | 198 | switch (boot_cpu_data.x86_vendor) { |
@@ -325,34 +428,22 @@ late_initcall(init_lapic_nmi_sysfs); | |||
325 | 428 | ||
326 | #endif /* CONFIG_PM */ | 429 | #endif /* CONFIG_PM */ |
327 | 430 | ||
328 | /* | 431 | static int setup_k7_watchdog(void) |
329 | * Activate the NMI watchdog via the local APIC. | ||
330 | * Original code written by Keith Owens. | ||
331 | */ | ||
332 | |||
333 | static void clear_msr_range(unsigned int base, unsigned int n) | ||
334 | { | 432 | { |
335 | unsigned int i; | ||
336 | |||
337 | for(i = 0; i < n; ++i) | ||
338 | wrmsr(base+i, 0, 0); | ||
339 | } | ||
340 | |||
341 | static void setup_k7_watchdog(void) | ||
342 | { | ||
343 | int i; | ||
344 | unsigned int evntsel; | 433 | unsigned int evntsel; |
345 | 434 | ||
346 | nmi_perfctr_msr = MSR_K7_PERFCTR0; | 435 | nmi_perfctr_msr = MSR_K7_PERFCTR0; |
347 | 436 | ||
348 | for(i = 0; i < 4; ++i) { | 437 | if (!reserve_perfctr_nmi(nmi_perfctr_msr)) |
349 | /* Simulator may not support it */ | 438 | goto fail; |
350 | if (checking_wrmsrl(MSR_K7_EVNTSEL0+i, 0UL)) { | 439 | |
351 | nmi_perfctr_msr = 0; | 440 | if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0)) |
352 | return; | 441 | goto fail1; |
353 | } | 442 | |
354 | wrmsrl(MSR_K7_PERFCTR0+i, 0UL); | 443 | /* Simulator may not support it */ |
355 | } | 444 | if (checking_wrmsrl(MSR_K7_EVNTSEL0, 0UL)) |
445 | goto fail2; | ||
446 | wrmsrl(MSR_K7_PERFCTR0, 0UL); | ||
356 | 447 | ||
357 | evntsel = K7_EVNTSEL_INT | 448 | evntsel = K7_EVNTSEL_INT |
358 | | K7_EVNTSEL_OS | 449 | | K7_EVNTSEL_OS |
@@ -364,6 +455,13 @@ static void setup_k7_watchdog(void) | |||
364 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 455 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
365 | evntsel |= K7_EVNTSEL_ENABLE; | 456 | evntsel |= K7_EVNTSEL_ENABLE; |
366 | wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); | 457 | wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); |
458 | return 1; | ||
459 | fail2: | ||
460 | release_evntsel_nmi(MSR_K7_EVNTSEL0); | ||
461 | fail1: | ||
462 | release_perfctr_nmi(nmi_perfctr_msr); | ||
463 | fail: | ||
464 | return 0; | ||
367 | } | 465 | } |
368 | 466 | ||
369 | 467 | ||
@@ -382,22 +480,11 @@ static int setup_p4_watchdog(void) | |||
382 | nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1; | 480 | nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1; |
383 | #endif | 481 | #endif |
384 | 482 | ||
385 | if (!(misc_enable & MSR_P4_MISC_ENABLE_PEBS_UNAVAIL)) | 483 | if (!reserve_perfctr_nmi(nmi_perfctr_msr)) |
386 | clear_msr_range(0x3F1, 2); | 484 | goto fail; |
387 | /* MSR 0x3F0 seems to have a default value of 0xFC00, but current | 485 | |
388 | docs doesn't fully define it, so leave it alone for now. */ | 486 | if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0)) |
389 | if (boot_cpu_data.x86_model >= 0x3) { | 487 | goto fail1; |
390 | /* MSR_P4_IQ_ESCR0/1 (0x3ba/0x3bb) removed */ | ||
391 | clear_msr_range(0x3A0, 26); | ||
392 | clear_msr_range(0x3BC, 3); | ||
393 | } else { | ||
394 | clear_msr_range(0x3A0, 31); | ||
395 | } | ||
396 | clear_msr_range(0x3C0, 6); | ||
397 | clear_msr_range(0x3C8, 6); | ||
398 | clear_msr_range(0x3E0, 2); | ||
399 | clear_msr_range(MSR_P4_CCCR0, 18); | ||
400 | clear_msr_range(MSR_P4_PERFCTR0, 18); | ||
401 | 488 | ||
402 | wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0); | 489 | wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0); |
403 | wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0); | 490 | wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0); |
@@ -406,6 +493,10 @@ static int setup_p4_watchdog(void) | |||
406 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 493 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
407 | wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); | 494 | wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); |
408 | return 1; | 495 | return 1; |
496 | fail1: | ||
497 | release_perfctr_nmi(nmi_perfctr_msr); | ||
498 | fail: | ||
499 | return 0; | ||
409 | } | 500 | } |
410 | 501 | ||
411 | void setup_apic_nmi_watchdog(void) | 502 | void setup_apic_nmi_watchdog(void) |
@@ -416,7 +507,8 @@ void setup_apic_nmi_watchdog(void) | |||
416 | return; | 507 | return; |
417 | if (strstr(boot_cpu_data.x86_model_id, "Screwdriver")) | 508 | if (strstr(boot_cpu_data.x86_model_id, "Screwdriver")) |
418 | return; | 509 | return; |
419 | setup_k7_watchdog(); | 510 | if (!setup_k7_watchdog()) |
511 | return; | ||
420 | break; | 512 | break; |
421 | case X86_VENDOR_INTEL: | 513 | case X86_VENDOR_INTEL: |
422 | if (boot_cpu_data.x86 != 15) | 514 | if (boot_cpu_data.x86 != 15) |
@@ -588,6 +680,12 @@ int proc_unknown_nmi_panic(struct ctl_table *table, int write, struct file *file | |||
588 | 680 | ||
589 | EXPORT_SYMBOL(nmi_active); | 681 | EXPORT_SYMBOL(nmi_active); |
590 | EXPORT_SYMBOL(nmi_watchdog); | 682 | EXPORT_SYMBOL(nmi_watchdog); |
683 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi); | ||
684 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); | ||
685 | EXPORT_SYMBOL(reserve_perfctr_nmi); | ||
686 | EXPORT_SYMBOL(release_perfctr_nmi); | ||
687 | EXPORT_SYMBOL(reserve_evntsel_nmi); | ||
688 | EXPORT_SYMBOL(release_evntsel_nmi); | ||
591 | EXPORT_SYMBOL(reserve_lapic_nmi); | 689 | EXPORT_SYMBOL(reserve_lapic_nmi); |
592 | EXPORT_SYMBOL(release_lapic_nmi); | 690 | EXPORT_SYMBOL(release_lapic_nmi); |
593 | EXPORT_SYMBOL(disable_timer_nmi_watchdog); | 691 | EXPORT_SYMBOL(disable_timer_nmi_watchdog); |
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h index 67d994799999..27fc9e6f630e 100644 --- a/include/asm-i386/nmi.h +++ b/include/asm-i386/nmi.h | |||
@@ -25,6 +25,13 @@ void set_nmi_callback(nmi_callback_t callback); | |||
25 | */ | 25 | */ |
26 | void unset_nmi_callback(void); | 26 | void unset_nmi_callback(void); |
27 | 27 | ||
28 | extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); | ||
29 | extern int avail_to_resrv_perfctr_nmi(unsigned int); | ||
30 | extern int reserve_perfctr_nmi(unsigned int); | ||
31 | extern void release_perfctr_nmi(unsigned int); | ||
32 | extern int reserve_evntsel_nmi(unsigned int); | ||
33 | extern void release_evntsel_nmi(unsigned int); | ||
34 | |||
28 | extern void setup_apic_nmi_watchdog (void); | 35 | extern void setup_apic_nmi_watchdog (void); |
29 | extern int reserve_lapic_nmi(void); | 36 | extern int reserve_lapic_nmi(void); |
30 | extern void release_lapic_nmi(void); | 37 | extern void release_lapic_nmi(void); |
diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h index efb45c894d76..62a784cb8f0c 100644 --- a/include/asm-x86_64/nmi.h +++ b/include/asm-x86_64/nmi.h | |||
@@ -56,7 +56,13 @@ extern int panic_on_timeout; | |||
56 | extern int unknown_nmi_panic; | 56 | extern int unknown_nmi_panic; |
57 | 57 | ||
58 | extern int check_nmi_watchdog(void); | 58 | extern int check_nmi_watchdog(void); |
59 | 59 | extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); | |
60 | extern int avail_to_resrv_perfctr_nmi(unsigned int); | ||
61 | extern int reserve_perfctr_nmi(unsigned int); | ||
62 | extern void release_perfctr_nmi(unsigned int); | ||
63 | extern int reserve_evntsel_nmi(unsigned int); | ||
64 | extern void release_evntsel_nmi(unsigned int); | ||
65 | |||
60 | extern void setup_apic_nmi_watchdog (void); | 66 | extern void setup_apic_nmi_watchdog (void); |
61 | extern int reserve_lapic_nmi(void); | 67 | extern int reserve_lapic_nmi(void); |
62 | extern void release_lapic_nmi(void); | 68 | extern void release_lapic_nmi(void); |