diff options
Diffstat (limited to 'arch/i386/kernel/nmi.c')
-rw-r--r-- | arch/i386/kernel/nmi.c | 98 |
1 files changed, 80 insertions, 18 deletions
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 1a6f8bb8881c..5d8a07c20281 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
@@ -185,7 +185,8 @@ static __cpuinit inline int nmi_known_cpu(void) | |||
185 | { | 185 | { |
186 | switch (boot_cpu_data.x86_vendor) { | 186 | switch (boot_cpu_data.x86_vendor) { |
187 | case X86_VENDOR_AMD: | 187 | case X86_VENDOR_AMD: |
188 | return ((boot_cpu_data.x86 == 15) || (boot_cpu_data.x86 == 6)); | 188 | return ((boot_cpu_data.x86 == 15) || (boot_cpu_data.x86 == 6) |
189 | || (boot_cpu_data.x86 == 16)); | ||
189 | case X86_VENDOR_INTEL: | 190 | case X86_VENDOR_INTEL: |
190 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | 191 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) |
191 | return 1; | 192 | return 1; |
@@ -216,6 +217,28 @@ static __init void nmi_cpu_busy(void *data) | |||
216 | } | 217 | } |
217 | #endif | 218 | #endif |
218 | 219 | ||
220 | static unsigned int adjust_for_32bit_ctr(unsigned int hz) | ||
221 | { | ||
222 | u64 counter_val; | ||
223 | unsigned int retval = hz; | ||
224 | |||
225 | /* | ||
226 | * On Intel CPUs with P6/ARCH_PERFMON only 32 bits in the counter | ||
227 | * are writable, with higher bits sign extending from bit 31. | ||
228 | * So, we can only program the counter with 31 bit values and | ||
229 | * 32nd bit should be 1, for 33.. to be 1. | ||
230 | * Find the appropriate nmi_hz | ||
231 | */ | ||
232 | counter_val = (u64)cpu_khz * 1000; | ||
233 | do_div(counter_val, retval); | ||
234 | if (counter_val > 0x7fffffffULL) { | ||
235 | u64 count = (u64)cpu_khz * 1000; | ||
236 | do_div(count, 0x7fffffffUL); | ||
237 | retval = count + 1; | ||
238 | } | ||
239 | return retval; | ||
240 | } | ||
241 | |||
219 | static int __init check_nmi_watchdog(void) | 242 | static int __init check_nmi_watchdog(void) |
220 | { | 243 | { |
221 | unsigned int *prev_nmi_count; | 244 | unsigned int *prev_nmi_count; |
@@ -281,18 +304,10 @@ static int __init check_nmi_watchdog(void) | |||
281 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | 304 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); |
282 | 305 | ||
283 | nmi_hz = 1; | 306 | nmi_hz = 1; |
284 | /* | 307 | |
285 | * On Intel CPUs with ARCH_PERFMON only 32 bits in the counter | 308 | if (wd->perfctr_msr == MSR_P6_PERFCTR0 || |
286 | * are writable, with higher bits sign extending from bit 31. | 309 | wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) { |
287 | * So, we can only program the counter with 31 bit values and | 310 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); |
288 | * 32nd bit should be 1, for 33.. to be 1. | ||
289 | * Find the appropriate nmi_hz | ||
290 | */ | ||
291 | if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0 && | ||
292 | ((u64)cpu_khz * 1000) > 0x7fffffffULL) { | ||
293 | u64 count = (u64)cpu_khz * 1000; | ||
294 | do_div(count, 0x7fffffffUL); | ||
295 | nmi_hz = count + 1; | ||
296 | } | 311 | } |
297 | } | 312 | } |
298 | 313 | ||
@@ -369,6 +384,34 @@ void enable_timer_nmi_watchdog(void) | |||
369 | } | 384 | } |
370 | } | 385 | } |
371 | 386 | ||
387 | static void __acpi_nmi_disable(void *__unused) | ||
388 | { | ||
389 | apic_write_around(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED); | ||
390 | } | ||
391 | |||
392 | /* | ||
393 | * Disable timer based NMIs on all CPUs: | ||
394 | */ | ||
395 | void acpi_nmi_disable(void) | ||
396 | { | ||
397 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | ||
398 | on_each_cpu(__acpi_nmi_disable, NULL, 0, 1); | ||
399 | } | ||
400 | |||
401 | static void __acpi_nmi_enable(void *__unused) | ||
402 | { | ||
403 | apic_write_around(APIC_LVT0, APIC_DM_NMI); | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * Enable timer based NMIs on all CPUs: | ||
408 | */ | ||
409 | void acpi_nmi_enable(void) | ||
410 | { | ||
411 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | ||
412 | on_each_cpu(__acpi_nmi_enable, NULL, 0, 1); | ||
413 | } | ||
414 | |||
372 | #ifdef CONFIG_PM | 415 | #ifdef CONFIG_PM |
373 | 416 | ||
374 | static int nmi_pm_active; /* nmi_active before suspend */ | 417 | static int nmi_pm_active; /* nmi_active before suspend */ |
@@ -442,6 +485,17 @@ static void write_watchdog_counter(unsigned int perfctr_msr, const char *descr) | |||
442 | wrmsrl(perfctr_msr, 0 - count); | 485 | wrmsrl(perfctr_msr, 0 - count); |
443 | } | 486 | } |
444 | 487 | ||
488 | static void write_watchdog_counter32(unsigned int perfctr_msr, | ||
489 | const char *descr) | ||
490 | { | ||
491 | u64 count = (u64)cpu_khz * 1000; | ||
492 | |||
493 | do_div(count, nmi_hz); | ||
494 | if(descr) | ||
495 | Dprintk("setting %s to -0x%08Lx\n", descr, count); | ||
496 | wrmsr(perfctr_msr, (u32)(-count), 0); | ||
497 | } | ||
498 | |||
445 | /* Note that these events don't tick when the CPU idles. This means | 499 | /* Note that these events don't tick when the CPU idles. This means |
446 | the frequency varies with CPU load. */ | 500 | the frequency varies with CPU load. */ |
447 | 501 | ||
@@ -531,7 +585,8 @@ static int setup_p6_watchdog(void) | |||
531 | 585 | ||
532 | /* setup the timer */ | 586 | /* setup the timer */ |
533 | wrmsr(evntsel_msr, evntsel, 0); | 587 | wrmsr(evntsel_msr, evntsel, 0); |
534 | write_watchdog_counter(perfctr_msr, "P6_PERFCTR0"); | 588 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); |
589 | write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0"); | ||
535 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 590 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
536 | evntsel |= P6_EVNTSEL0_ENABLE; | 591 | evntsel |= P6_EVNTSEL0_ENABLE; |
537 | wrmsr(evntsel_msr, evntsel, 0); | 592 | wrmsr(evntsel_msr, evntsel, 0); |
@@ -704,7 +759,8 @@ static int setup_intel_arch_watchdog(void) | |||
704 | 759 | ||
705 | /* setup the timer */ | 760 | /* setup the timer */ |
706 | wrmsr(evntsel_msr, evntsel, 0); | 761 | wrmsr(evntsel_msr, evntsel, 0); |
707 | write_watchdog_counter(perfctr_msr, "INTEL_ARCH_PERFCTR0"); | 762 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); |
763 | write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0"); | ||
708 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 764 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
709 | evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 765 | evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; |
710 | wrmsr(evntsel_msr, evntsel, 0); | 766 | wrmsr(evntsel_msr, evntsel, 0); |
@@ -762,7 +818,8 @@ void setup_apic_nmi_watchdog (void *unused) | |||
762 | if (nmi_watchdog == NMI_LOCAL_APIC) { | 818 | if (nmi_watchdog == NMI_LOCAL_APIC) { |
763 | switch (boot_cpu_data.x86_vendor) { | 819 | switch (boot_cpu_data.x86_vendor) { |
764 | case X86_VENDOR_AMD: | 820 | case X86_VENDOR_AMD: |
765 | if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15) | 821 | if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 && |
822 | boot_cpu_data.x86 != 16) | ||
766 | return; | 823 | return; |
767 | if (!setup_k7_watchdog()) | 824 | if (!setup_k7_watchdog()) |
768 | return; | 825 | return; |
@@ -956,6 +1013,8 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) | |||
956 | dummy &= ~P4_CCCR_OVF; | 1013 | dummy &= ~P4_CCCR_OVF; |
957 | wrmsrl(wd->cccr_msr, dummy); | 1014 | wrmsrl(wd->cccr_msr, dummy); |
958 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 1015 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
1016 | /* start the cycle over again */ | ||
1017 | write_watchdog_counter(wd->perfctr_msr, NULL); | ||
959 | } | 1018 | } |
960 | else if (wd->perfctr_msr == MSR_P6_PERFCTR0 || | 1019 | else if (wd->perfctr_msr == MSR_P6_PERFCTR0 || |
961 | wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) { | 1020 | wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) { |
@@ -964,9 +1023,12 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) | |||
964 | * other P6 variant. | 1023 | * other P6 variant. |
965 | * ArchPerfom/Core Duo also needs this */ | 1024 | * ArchPerfom/Core Duo also needs this */ |
966 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 1025 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
1026 | /* P6/ARCH_PERFMON has 32 bit counter write */ | ||
1027 | write_watchdog_counter32(wd->perfctr_msr, NULL); | ||
1028 | } else { | ||
1029 | /* start the cycle over again */ | ||
1030 | write_watchdog_counter(wd->perfctr_msr, NULL); | ||
967 | } | 1031 | } |
968 | /* start the cycle over again */ | ||
969 | write_watchdog_counter(wd->perfctr_msr, NULL); | ||
970 | rc = 1; | 1032 | rc = 1; |
971 | } else if (nmi_watchdog == NMI_IO_APIC) { | 1033 | } else if (nmi_watchdog == NMI_IO_APIC) { |
972 | /* don't know how to accurately check for this. | 1034 | /* don't know how to accurately check for this. |