diff options
Diffstat (limited to 'arch/x86/kernel/traps.c')
-rw-r--r-- | arch/x86/kernel/traps.c | 167 |
1 files changed, 71 insertions, 96 deletions
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 60788dee0f8a..b9b67166f9de 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -83,6 +83,13 @@ EXPORT_SYMBOL_GPL(used_vectors); | |||
83 | 83 | ||
84 | static int ignore_nmis; | 84 | static int ignore_nmis; |
85 | 85 | ||
86 | int unknown_nmi_panic; | ||
87 | /* | ||
88 | * Prevent NMI reason port (0x61) being accessed simultaneously, can | ||
89 | * only be used in NMI handler. | ||
90 | */ | ||
91 | static DEFINE_RAW_SPINLOCK(nmi_reason_lock); | ||
92 | |||
86 | static inline void conditional_sti(struct pt_regs *regs) | 93 | static inline void conditional_sti(struct pt_regs *regs) |
87 | { | 94 | { |
88 | if (regs->flags & X86_EFLAGS_IF) | 95 | if (regs->flags & X86_EFLAGS_IF) |
@@ -300,16 +307,23 @@ gp_in_kernel: | |||
300 | die("general protection fault", regs, error_code); | 307 | die("general protection fault", regs, error_code); |
301 | } | 308 | } |
302 | 309 | ||
303 | static notrace __kprobes void | 310 | static int __init setup_unknown_nmi_panic(char *str) |
304 | mem_parity_error(unsigned char reason, struct pt_regs *regs) | ||
305 | { | 311 | { |
306 | printk(KERN_EMERG | 312 | unknown_nmi_panic = 1; |
307 | "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", | 313 | return 1; |
308 | reason, smp_processor_id()); | 314 | } |
315 | __setup("unknown_nmi_panic", setup_unknown_nmi_panic); | ||
309 | 316 | ||
310 | printk(KERN_EMERG | 317 | static notrace __kprobes void |
311 | "You have some hardware problem, likely on the PCI bus.\n"); | 318 | pci_serr_error(unsigned char reason, struct pt_regs *regs) |
319 | { | ||
320 | pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n", | ||
321 | reason, smp_processor_id()); | ||
312 | 322 | ||
323 | /* | ||
324 | * On some machines, PCI SERR line is used to report memory | ||
325 | * errors. EDAC makes use of it. | ||
326 | */ | ||
313 | #if defined(CONFIG_EDAC) | 327 | #if defined(CONFIG_EDAC) |
314 | if (edac_handler_set()) { | 328 | if (edac_handler_set()) { |
315 | edac_atomic_assert_error(); | 329 | edac_atomic_assert_error(); |
@@ -320,11 +334,11 @@ mem_parity_error(unsigned char reason, struct pt_regs *regs) | |||
320 | if (panic_on_unrecovered_nmi) | 334 | if (panic_on_unrecovered_nmi) |
321 | panic("NMI: Not continuing"); | 335 | panic("NMI: Not continuing"); |
322 | 336 | ||
323 | printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); | 337 | pr_emerg("Dazed and confused, but trying to continue\n"); |
324 | 338 | ||
325 | /* Clear and disable the memory parity error line. */ | 339 | /* Clear and disable the PCI SERR error line. */ |
326 | reason = (reason & 0xf) | 4; | 340 | reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR; |
327 | outb(reason, 0x61); | 341 | outb(reason, NMI_REASON_PORT); |
328 | } | 342 | } |
329 | 343 | ||
330 | static notrace __kprobes void | 344 | static notrace __kprobes void |
@@ -332,22 +346,26 @@ io_check_error(unsigned char reason, struct pt_regs *regs) | |||
332 | { | 346 | { |
333 | unsigned long i; | 347 | unsigned long i; |
334 | 348 | ||
335 | printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n"); | 349 | pr_emerg( |
350 | "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n", | ||
351 | reason, smp_processor_id()); | ||
336 | show_registers(regs); | 352 | show_registers(regs); |
337 | 353 | ||
338 | if (panic_on_io_nmi) | 354 | if (panic_on_io_nmi) |
339 | panic("NMI IOCK error: Not continuing"); | 355 | panic("NMI IOCK error: Not continuing"); |
340 | 356 | ||
341 | /* Re-enable the IOCK line, wait for a few seconds */ | 357 | /* Re-enable the IOCK line, wait for a few seconds */ |
342 | reason = (reason & 0xf) | 8; | 358 | reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK; |
343 | outb(reason, 0x61); | 359 | outb(reason, NMI_REASON_PORT); |
344 | 360 | ||
345 | i = 2000; | 361 | i = 20000; |
346 | while (--i) | 362 | while (--i) { |
347 | udelay(1000); | 363 | touch_nmi_watchdog(); |
364 | udelay(100); | ||
365 | } | ||
348 | 366 | ||
349 | reason &= ~8; | 367 | reason &= ~NMI_REASON_CLEAR_IOCHK; |
350 | outb(reason, 0x61); | 368 | outb(reason, NMI_REASON_PORT); |
351 | } | 369 | } |
352 | 370 | ||
353 | static notrace __kprobes void | 371 | static notrace __kprobes void |
@@ -366,69 +384,50 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs) | |||
366 | return; | 384 | return; |
367 | } | 385 | } |
368 | #endif | 386 | #endif |
369 | printk(KERN_EMERG | 387 | pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", |
370 | "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", | 388 | reason, smp_processor_id()); |
371 | reason, smp_processor_id()); | ||
372 | 389 | ||
373 | printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n"); | 390 | pr_emerg("Do you have a strange power saving mode enabled?\n"); |
374 | if (panic_on_unrecovered_nmi) | 391 | if (unknown_nmi_panic || panic_on_unrecovered_nmi) |
375 | panic("NMI: Not continuing"); | 392 | panic("NMI: Not continuing"); |
376 | 393 | ||
377 | printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); | 394 | pr_emerg("Dazed and confused, but trying to continue\n"); |
378 | } | 395 | } |
379 | 396 | ||
380 | static notrace __kprobes void default_do_nmi(struct pt_regs *regs) | 397 | static notrace __kprobes void default_do_nmi(struct pt_regs *regs) |
381 | { | 398 | { |
382 | unsigned char reason = 0; | 399 | unsigned char reason = 0; |
383 | int cpu; | ||
384 | 400 | ||
385 | cpu = smp_processor_id(); | 401 | /* |
386 | 402 | * CPU-specific NMI must be processed before non-CPU-specific | |
387 | /* Only the BSP gets external NMIs from the system. */ | 403 | * NMI, otherwise we may lose it, because the CPU-specific |
388 | if (!cpu) | 404 | * NMI can not be detected/processed on other CPUs. |
389 | reason = get_nmi_reason(); | 405 | */ |
390 | 406 | if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP) | |
391 | if (!(reason & 0xc0)) { | 407 | return; |
392 | if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT) | ||
393 | == NOTIFY_STOP) | ||
394 | return; | ||
395 | 408 | ||
396 | #ifdef CONFIG_X86_LOCAL_APIC | 409 | /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */ |
397 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) | 410 | raw_spin_lock(&nmi_reason_lock); |
398 | == NOTIFY_STOP) | 411 | reason = get_nmi_reason(); |
399 | return; | ||
400 | 412 | ||
401 | #ifndef CONFIG_LOCKUP_DETECTOR | 413 | if (reason & NMI_REASON_MASK) { |
414 | if (reason & NMI_REASON_SERR) | ||
415 | pci_serr_error(reason, regs); | ||
416 | else if (reason & NMI_REASON_IOCHK) | ||
417 | io_check_error(reason, regs); | ||
418 | #ifdef CONFIG_X86_32 | ||
402 | /* | 419 | /* |
403 | * Ok, so this is none of the documented NMI sources, | 420 | * Reassert NMI in case it became active |
404 | * so it must be the NMI watchdog. | 421 | * meanwhile as it's edge-triggered: |
405 | */ | 422 | */ |
406 | if (nmi_watchdog_tick(regs, reason)) | 423 | reassert_nmi(); |
407 | return; | ||
408 | if (!do_nmi_callback(regs, cpu)) | ||
409 | #endif /* !CONFIG_LOCKUP_DETECTOR */ | ||
410 | unknown_nmi_error(reason, regs); | ||
411 | #else | ||
412 | unknown_nmi_error(reason, regs); | ||
413 | #endif | 424 | #endif |
414 | 425 | raw_spin_unlock(&nmi_reason_lock); | |
415 | return; | 426 | return; |
416 | } | 427 | } |
417 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) | 428 | raw_spin_unlock(&nmi_reason_lock); |
418 | return; | ||
419 | 429 | ||
420 | /* AK: following checks seem to be broken on modern chipsets. FIXME */ | 430 | unknown_nmi_error(reason, regs); |
421 | if (reason & 0x80) | ||
422 | mem_parity_error(reason, regs); | ||
423 | if (reason & 0x40) | ||
424 | io_check_error(reason, regs); | ||
425 | #ifdef CONFIG_X86_32 | ||
426 | /* | ||
427 | * Reassert NMI in case it became active meanwhile | ||
428 | * as it's edge-triggered: | ||
429 | */ | ||
430 | reassert_nmi(); | ||
431 | #endif | ||
432 | } | 431 | } |
433 | 432 | ||
434 | dotraplinkage notrace __kprobes void | 433 | dotraplinkage notrace __kprobes void |
@@ -446,14 +445,12 @@ do_nmi(struct pt_regs *regs, long error_code) | |||
446 | 445 | ||
447 | void stop_nmi(void) | 446 | void stop_nmi(void) |
448 | { | 447 | { |
449 | acpi_nmi_disable(); | ||
450 | ignore_nmis++; | 448 | ignore_nmis++; |
451 | } | 449 | } |
452 | 450 | ||
453 | void restart_nmi(void) | 451 | void restart_nmi(void) |
454 | { | 452 | { |
455 | ignore_nmis--; | 453 | ignore_nmis--; |
456 | acpi_nmi_enable(); | ||
457 | } | 454 | } |
458 | 455 | ||
459 | /* May run on IST stack. */ | 456 | /* May run on IST stack. */ |
@@ -575,6 +572,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) | |||
575 | if (regs->flags & X86_VM_MASK) { | 572 | if (regs->flags & X86_VM_MASK) { |
576 | handle_vm86_trap((struct kernel_vm86_regs *) regs, | 573 | handle_vm86_trap((struct kernel_vm86_regs *) regs, |
577 | error_code, 1); | 574 | error_code, 1); |
575 | preempt_conditional_cli(regs); | ||
578 | return; | 576 | return; |
579 | } | 577 | } |
580 | 578 | ||
@@ -776,21 +774,10 @@ asmlinkage void math_state_restore(void) | |||
776 | } | 774 | } |
777 | EXPORT_SYMBOL_GPL(math_state_restore); | 775 | EXPORT_SYMBOL_GPL(math_state_restore); |
778 | 776 | ||
779 | #ifndef CONFIG_MATH_EMULATION | ||
780 | void math_emulate(struct math_emu_info *info) | ||
781 | { | ||
782 | printk(KERN_EMERG | ||
783 | "math-emulation not enabled and no coprocessor found.\n"); | ||
784 | printk(KERN_EMERG "killing %s.\n", current->comm); | ||
785 | force_sig(SIGFPE, current); | ||
786 | schedule(); | ||
787 | } | ||
788 | #endif /* CONFIG_MATH_EMULATION */ | ||
789 | |||
790 | dotraplinkage void __kprobes | 777 | dotraplinkage void __kprobes |
791 | do_device_not_available(struct pt_regs *regs, long error_code) | 778 | do_device_not_available(struct pt_regs *regs, long error_code) |
792 | { | 779 | { |
793 | #ifdef CONFIG_X86_32 | 780 | #ifdef CONFIG_MATH_EMULATION |
794 | if (read_cr0() & X86_CR0_EM) { | 781 | if (read_cr0() & X86_CR0_EM) { |
795 | struct math_emu_info info = { }; | 782 | struct math_emu_info info = { }; |
796 | 783 | ||
@@ -798,12 +785,12 @@ do_device_not_available(struct pt_regs *regs, long error_code) | |||
798 | 785 | ||
799 | info.regs = regs; | 786 | info.regs = regs; |
800 | math_emulate(&info); | 787 | math_emulate(&info); |
801 | } else { | 788 | return; |
802 | math_state_restore(); /* interrupts still off */ | ||
803 | conditional_sti(regs); | ||
804 | } | 789 | } |
805 | #else | 790 | #endif |
806 | math_state_restore(); | 791 | math_state_restore(); /* interrupts still off */ |
792 | #ifdef CONFIG_X86_32 | ||
793 | conditional_sti(regs); | ||
807 | #endif | 794 | #endif |
808 | } | 795 | } |
809 | 796 | ||
@@ -881,18 +868,6 @@ void __init trap_init(void) | |||
881 | #endif | 868 | #endif |
882 | 869 | ||
883 | #ifdef CONFIG_X86_32 | 870 | #ifdef CONFIG_X86_32 |
884 | if (cpu_has_fxsr) { | ||
885 | printk(KERN_INFO "Enabling fast FPU save and restore... "); | ||
886 | set_in_cr4(X86_CR4_OSFXSR); | ||
887 | printk("done.\n"); | ||
888 | } | ||
889 | if (cpu_has_xmm) { | ||
890 | printk(KERN_INFO | ||
891 | "Enabling unmasked SIMD FPU exception support... "); | ||
892 | set_in_cr4(X86_CR4_OSXMMEXCPT); | ||
893 | printk("done.\n"); | ||
894 | } | ||
895 | |||
896 | set_system_trap_gate(SYSCALL_VECTOR, &system_call); | 871 | set_system_trap_gate(SYSCALL_VECTOR, &system_call); |
897 | set_bit(SYSCALL_VECTOR, used_vectors); | 872 | set_bit(SYSCALL_VECTOR, used_vectors); |
898 | #endif | 873 | #endif |