aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/traps.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/traps.c')
-rw-r--r--arch/x86/kernel/traps.c177
1 files changed, 50 insertions, 127 deletions
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 1168e4454188..02cfb9b8f5b1 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -108,15 +108,6 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
108 dec_preempt_count(); 108 dec_preempt_count();
109} 109}
110 110
111#ifdef CONFIG_X86_32
112static inline void
113die_if_kernel(const char *str, struct pt_regs *regs, long err)
114{
115 if (!user_mode_vm(regs))
116 die(str, regs, err);
117}
118#endif
119
120static void __kprobes 111static void __kprobes
121do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, 112do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
122 long error_code, siginfo_t *info) 113 long error_code, siginfo_t *info)
@@ -543,11 +534,11 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
543 534
544 /* DR6 may or may not be cleared by the CPU */ 535 /* DR6 may or may not be cleared by the CPU */
545 set_debugreg(0, 6); 536 set_debugreg(0, 6);
537
546 /* 538 /*
547 * The processor cleared BTF, so don't mark that we need it set. 539 * The processor cleared BTF, so don't mark that we need it set.
548 */ 540 */
549 clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR); 541 clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
550 tsk->thread.debugctlmsr = 0;
551 542
552 /* Store the virtualized DR6 value */ 543 /* Store the virtualized DR6 value */
553 tsk->thread.debugreg6 = dr6; 544 tsk->thread.debugreg6 = dr6;
@@ -585,55 +576,67 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
585 return; 576 return;
586} 577}
587 578
588#ifdef CONFIG_X86_64
589static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
590{
591 if (fixup_exception(regs))
592 return 1;
593
594 notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
595 /* Illegal floating point operation in the kernel */
596 current->thread.trap_no = trapnr;
597 die(str, regs, 0);
598 return 0;
599}
600#endif
601
602/* 579/*
603 * Note that we play around with the 'TS' bit in an attempt to get 580 * Note that we play around with the 'TS' bit in an attempt to get
604 * the correct behaviour even in the presence of the asynchronous 581 * the correct behaviour even in the presence of the asynchronous
605 * IRQ13 behaviour 582 * IRQ13 behaviour
606 */ 583 */
607void math_error(void __user *ip) 584void math_error(struct pt_regs *regs, int error_code, int trapnr)
608{ 585{
609 struct task_struct *task; 586 struct task_struct *task = current;
610 siginfo_t info; 587 siginfo_t info;
611 unsigned short cwd, swd, err; 588 unsigned short err;
589 char *str = (trapnr == 16) ? "fpu exception" : "simd exception";
590
591 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
592 return;
593 conditional_sti(regs);
594
595 if (!user_mode_vm(regs))
596 {
597 if (!fixup_exception(regs)) {
598 task->thread.error_code = error_code;
599 task->thread.trap_no = trapnr;
600 die(str, regs, error_code);
601 }
602 return;
603 }
612 604
613 /* 605 /*
614 * Save the info for the exception handler and clear the error. 606 * Save the info for the exception handler and clear the error.
615 */ 607 */
616 task = current;
617 save_init_fpu(task); 608 save_init_fpu(task);
618 task->thread.trap_no = 16; 609 task->thread.trap_no = trapnr;
619 task->thread.error_code = 0; 610 task->thread.error_code = error_code;
620 info.si_signo = SIGFPE; 611 info.si_signo = SIGFPE;
621 info.si_errno = 0; 612 info.si_errno = 0;
622 info.si_addr = ip; 613 info.si_addr = (void __user *)regs->ip;
623 /* 614 if (trapnr == 16) {
624 * (~cwd & swd) will mask out exceptions that are not set to unmasked 615 unsigned short cwd, swd;
625 * status. 0x3f is the exception bits in these regs, 0x200 is the 616 /*
626 * C1 reg you need in case of a stack fault, 0x040 is the stack 617 * (~cwd & swd) will mask out exceptions that are not set to unmasked
627 * fault bit. We should only be taking one exception at a time, 618 * status. 0x3f is the exception bits in these regs, 0x200 is the
628 * so if this combination doesn't produce any single exception, 619 * C1 reg you need in case of a stack fault, 0x040 is the stack
629 * then we have a bad program that isn't synchronizing its FPU usage 620 * fault bit. We should only be taking one exception at a time,
630 * and it will suffer the consequences since we won't be able to 621 * so if this combination doesn't produce any single exception,
631 * fully reproduce the context of the exception 622 * then we have a bad program that isn't synchronizing its FPU usage
632 */ 623 * and it will suffer the consequences since we won't be able to
633 cwd = get_fpu_cwd(task); 624 * fully reproduce the context of the exception
634 swd = get_fpu_swd(task); 625 */
626 cwd = get_fpu_cwd(task);
627 swd = get_fpu_swd(task);
635 628
636 err = swd & ~cwd; 629 err = swd & ~cwd;
630 } else {
631 /*
632 * The SIMD FPU exceptions are handled a little differently, as there
633 * is only a single status/control register. Thus, to determine which
634 * unmasked exception was caught we must mask the exception mask bits
635 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
636 */
637 unsigned short mxcsr = get_fpu_mxcsr(task);
638 err = ~(mxcsr >> 7) & mxcsr;
639 }
637 640
638 if (err & 0x001) { /* Invalid op */ 641 if (err & 0x001) { /* Invalid op */
639 /* 642 /*
@@ -662,97 +665,17 @@ void math_error(void __user *ip)
662 665
663dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) 666dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
664{ 667{
665 conditional_sti(regs);
666
667#ifdef CONFIG_X86_32 668#ifdef CONFIG_X86_32
668 ignore_fpu_irq = 1; 669 ignore_fpu_irq = 1;
669#else
670 if (!user_mode(regs) &&
671 kernel_math_error(regs, "kernel x87 math error", 16))
672 return;
673#endif 670#endif
674 671
675 math_error((void __user *)regs->ip); 672 math_error(regs, error_code, 16);
676}
677
678static void simd_math_error(void __user *ip)
679{
680 struct task_struct *task;
681 siginfo_t info;
682 unsigned short mxcsr;
683
684 /*
685 * Save the info for the exception handler and clear the error.
686 */
687 task = current;
688 save_init_fpu(task);
689 task->thread.trap_no = 19;
690 task->thread.error_code = 0;
691 info.si_signo = SIGFPE;
692 info.si_errno = 0;
693 info.si_code = __SI_FAULT;
694 info.si_addr = ip;
695 /*
696 * The SIMD FPU exceptions are handled a little differently, as there
697 * is only a single status/control register. Thus, to determine which
698 * unmasked exception was caught we must mask the exception mask bits
699 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
700 */
701 mxcsr = get_fpu_mxcsr(task);
702 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
703 case 0x000:
704 default:
705 break;
706 case 0x001: /* Invalid Op */
707 info.si_code = FPE_FLTINV;
708 break;
709 case 0x002: /* Denormalize */
710 case 0x010: /* Underflow */
711 info.si_code = FPE_FLTUND;
712 break;
713 case 0x004: /* Zero Divide */
714 info.si_code = FPE_FLTDIV;
715 break;
716 case 0x008: /* Overflow */
717 info.si_code = FPE_FLTOVF;
718 break;
719 case 0x020: /* Precision */
720 info.si_code = FPE_FLTRES;
721 break;
722 }
723 force_sig_info(SIGFPE, &info, task);
724} 673}
725 674
726dotraplinkage void 675dotraplinkage void
727do_simd_coprocessor_error(struct pt_regs *regs, long error_code) 676do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
728{ 677{
729 conditional_sti(regs); 678 math_error(regs, error_code, 19);
730
731#ifdef CONFIG_X86_32
732 if (cpu_has_xmm) {
733 /* Handle SIMD FPU exceptions on PIII+ processors. */
734 ignore_fpu_irq = 1;
735 simd_math_error((void __user *)regs->ip);
736 return;
737 }
738 /*
739 * Handle strange cache flush from user space exception
740 * in all other cases. This is undocumented behaviour.
741 */
742 if (regs->flags & X86_VM_MASK) {
743 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
744 return;
745 }
746 current->thread.trap_no = 19;
747 current->thread.error_code = error_code;
748 die_if_kernel("cache flush denied", regs, error_code);
749 force_sig(SIGSEGV, current);
750#else
751 if (!user_mode(regs) &&
752 kernel_math_error(regs, "kernel simd math error", 19))
753 return;
754 simd_math_error((void __user *)regs->ip);
755#endif
756} 679}
757 680
758dotraplinkage void 681dotraplinkage void