aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/traps.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/traps.c')
-rw-r--r--arch/x86/kernel/traps.c93
1 files changed, 45 insertions, 48 deletions
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 04d242ab0161..bde57f0f1616 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -20,7 +20,6 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/ptrace.h> 21#include <linux/ptrace.h>
22#include <linux/string.h> 22#include <linux/string.h>
23#include <linux/unwind.h>
24#include <linux/delay.h> 23#include <linux/delay.h>
25#include <linux/errno.h> 24#include <linux/errno.h>
26#include <linux/kexec.h> 25#include <linux/kexec.h>
@@ -51,30 +50,22 @@
51#include <asm/debugreg.h> 50#include <asm/debugreg.h>
52#include <asm/atomic.h> 51#include <asm/atomic.h>
53#include <asm/system.h> 52#include <asm/system.h>
54#include <asm/unwind.h>
55#include <asm/traps.h> 53#include <asm/traps.h>
56#include <asm/desc.h> 54#include <asm/desc.h>
57#include <asm/i387.h> 55#include <asm/i387.h>
58 56
59#include <mach_traps.h> 57#include <asm/mach_traps.h>
60 58
61#ifdef CONFIG_X86_64 59#ifdef CONFIG_X86_64
62#include <asm/pgalloc.h> 60#include <asm/pgalloc.h>
63#include <asm/proto.h> 61#include <asm/proto.h>
64#include <asm/pda.h>
65#else 62#else
66#include <asm/processor-flags.h> 63#include <asm/processor-flags.h>
67#include <asm/arch_hooks.h> 64#include <asm/arch_hooks.h>
68#include <asm/nmi.h>
69#include <asm/smp.h>
70#include <asm/io.h>
71#include <asm/traps.h> 65#include <asm/traps.h>
72 66
73#include "cpu/mcheck/mce.h" 67#include "cpu/mcheck/mce.h"
74 68
75DECLARE_BITMAP(used_vectors, NR_VECTORS);
76EXPORT_SYMBOL_GPL(used_vectors);
77
78asmlinkage int system_call(void); 69asmlinkage int system_call(void);
79 70
80/* Do we ignore FPU interrupts ? */ 71/* Do we ignore FPU interrupts ? */
@@ -89,6 +80,9 @@ gate_desc idt_table[256]
89 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; 80 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
90#endif 81#endif
91 82
83DECLARE_BITMAP(used_vectors, NR_VECTORS);
84EXPORT_SYMBOL_GPL(used_vectors);
85
92static int ignore_nmis; 86static int ignore_nmis;
93 87
94static inline void conditional_sti(struct pt_regs *regs) 88static inline void conditional_sti(struct pt_regs *regs)
@@ -292,8 +286,10 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
292 tsk->thread.error_code = error_code; 286 tsk->thread.error_code = error_code;
293 tsk->thread.trap_no = 8; 287 tsk->thread.trap_no = 8;
294 288
295 /* This is always a kernel trap and never fixable (and thus must 289 /*
296 never return). */ 290 * This is always a kernel trap and never fixable (and thus must
291 * never return).
292 */
297 for (;;) 293 for (;;)
298 die(str, regs, error_code); 294 die(str, regs, error_code);
299} 295}
@@ -481,11 +477,7 @@ do_nmi(struct pt_regs *regs, long error_code)
481{ 477{
482 nmi_enter(); 478 nmi_enter();
483 479
484#ifdef CONFIG_X86_32 480 inc_irq_stat(__nmi_count);
485 { int cpu; cpu = smp_processor_id(); ++nmi_count(cpu); }
486#else
487 add_pda(__nmi_count, 1);
488#endif
489 481
490 if (!ignore_nmis) 482 if (!ignore_nmis)
491 default_do_nmi(regs); 483 default_do_nmi(regs);
@@ -524,9 +516,11 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
524} 516}
525 517
526#ifdef CONFIG_X86_64 518#ifdef CONFIG_X86_64
527/* Help handler running on IST stack to switch back to user stack 519/*
528 for scheduling or signal handling. The actual stack switch is done in 520 * Help handler running on IST stack to switch back to user stack
529 entry.S */ 521 * for scheduling or signal handling. The actual stack switch is done in
522 * entry.S
523 */
530asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) 524asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
531{ 525{
532 struct pt_regs *regs = eregs; 526 struct pt_regs *regs = eregs;
@@ -536,8 +530,10 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
536 /* Exception from user space */ 530 /* Exception from user space */
537 else if (user_mode(eregs)) 531 else if (user_mode(eregs))
538 regs = task_pt_regs(current); 532 regs = task_pt_regs(current);
539 /* Exception from kernel and interrupts are enabled. Move to 533 /*
540 kernel process stack. */ 534 * Exception from kernel and interrupts are enabled. Move to
535 * kernel process stack.
536 */
541 else if (eregs->flags & X86_EFLAGS_IF) 537 else if (eregs->flags & X86_EFLAGS_IF)
542 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); 538 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
543 if (eregs != regs) 539 if (eregs != regs)
@@ -664,7 +660,7 @@ void math_error(void __user *ip)
664{ 660{
665 struct task_struct *task; 661 struct task_struct *task;
666 siginfo_t info; 662 siginfo_t info;
667 unsigned short cwd, swd; 663 unsigned short cwd, swd, err;
668 664
669 /* 665 /*
670 * Save the info for the exception handler and clear the error. 666 * Save the info for the exception handler and clear the error.
@@ -675,7 +671,6 @@ void math_error(void __user *ip)
675 task->thread.error_code = 0; 671 task->thread.error_code = 0;
676 info.si_signo = SIGFPE; 672 info.si_signo = SIGFPE;
677 info.si_errno = 0; 673 info.si_errno = 0;
678 info.si_code = __SI_FAULT;
679 info.si_addr = ip; 674 info.si_addr = ip;
680 /* 675 /*
681 * (~cwd & swd) will mask out exceptions that are not set to unmasked 676 * (~cwd & swd) will mask out exceptions that are not set to unmasked
@@ -689,34 +684,30 @@ void math_error(void __user *ip)
689 */ 684 */
690 cwd = get_fpu_cwd(task); 685 cwd = get_fpu_cwd(task);
691 swd = get_fpu_swd(task); 686 swd = get_fpu_swd(task);
692 switch (swd & ~cwd & 0x3f) { 687
693 case 0x000: /* No unmasked exception */ 688 err = swd & ~cwd;
694#ifdef CONFIG_X86_32 689
695 return; 690 if (err & 0x001) { /* Invalid op */
696#endif
697 default: /* Multiple exceptions */
698 break;
699 case 0x001: /* Invalid Op */
700 /* 691 /*
701 * swd & 0x240 == 0x040: Stack Underflow 692 * swd & 0x240 == 0x040: Stack Underflow
702 * swd & 0x240 == 0x240: Stack Overflow 693 * swd & 0x240 == 0x240: Stack Overflow
703 * User must clear the SF bit (0x40) if set 694 * User must clear the SF bit (0x40) if set
704 */ 695 */
705 info.si_code = FPE_FLTINV; 696 info.si_code = FPE_FLTINV;
706 break; 697 } else if (err & 0x004) { /* Divide by Zero */
707 case 0x002: /* Denormalize */
708 case 0x010: /* Underflow */
709 info.si_code = FPE_FLTUND;
710 break;
711 case 0x004: /* Zero Divide */
712 info.si_code = FPE_FLTDIV; 698 info.si_code = FPE_FLTDIV;
713 break; 699 } else if (err & 0x008) { /* Overflow */
714 case 0x008: /* Overflow */
715 info.si_code = FPE_FLTOVF; 700 info.si_code = FPE_FLTOVF;
716 break; 701 } else if (err & 0x012) { /* Denormal, Underflow */
717 case 0x020: /* Precision */ 702 info.si_code = FPE_FLTUND;
703 } else if (err & 0x020) { /* Precision */
718 info.si_code = FPE_FLTRES; 704 info.si_code = FPE_FLTRES;
719 break; 705 } else {
706 /*
707 * If we're using IRQ 13, or supposedly even some trap 16
708 * implementations, it's possible we get a spurious trap...
709 */
710 return; /* Spurious trap, no error */
720 } 711 }
721 force_sig_info(SIGFPE, &info, task); 712 force_sig_info(SIGFPE, &info, task);
722} 713}
@@ -904,7 +895,7 @@ asmlinkage void math_state_restore(void)
904EXPORT_SYMBOL_GPL(math_state_restore); 895EXPORT_SYMBOL_GPL(math_state_restore);
905 896
906#ifndef CONFIG_MATH_EMULATION 897#ifndef CONFIG_MATH_EMULATION
907asmlinkage void math_emulate(long arg) 898void math_emulate(struct math_emu_info *info)
908{ 899{
909 printk(KERN_EMERG 900 printk(KERN_EMERG
910 "math-emulation not enabled and no coprocessor found.\n"); 901 "math-emulation not enabled and no coprocessor found.\n");
@@ -915,12 +906,16 @@ asmlinkage void math_emulate(long arg)
915#endif /* CONFIG_MATH_EMULATION */ 906#endif /* CONFIG_MATH_EMULATION */
916 907
917dotraplinkage void __kprobes 908dotraplinkage void __kprobes
918do_device_not_available(struct pt_regs *regs, long error) 909do_device_not_available(struct pt_regs *regs, long error_code)
919{ 910{
920#ifdef CONFIG_X86_32 911#ifdef CONFIG_X86_32
921 if (read_cr0() & X86_CR0_EM) { 912 if (read_cr0() & X86_CR0_EM) {
913 struct math_emu_info info = { };
914
922 conditional_sti(regs); 915 conditional_sti(regs);
923 math_emulate(0); 916
917 info.regs = regs;
918 math_emulate(&info);
924 } else { 919 } else {
925 math_state_restore(); /* interrupts still off */ 920 math_state_restore(); /* interrupts still off */
926 conditional_sti(regs); 921 conditional_sti(regs);
@@ -949,9 +944,7 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
949 944
950void __init trap_init(void) 945void __init trap_init(void)
951{ 946{
952#ifdef CONFIG_X86_32
953 int i; 947 int i;
954#endif
955 948
956#ifdef CONFIG_EISA 949#ifdef CONFIG_EISA
957 void __iomem *p = early_ioremap(0x0FFFD9, 4); 950 void __iomem *p = early_ioremap(0x0FFFD9, 4);
@@ -1008,11 +1001,15 @@ void __init trap_init(void)
1008 } 1001 }
1009 1002
1010 set_system_trap_gate(SYSCALL_VECTOR, &system_call); 1003 set_system_trap_gate(SYSCALL_VECTOR, &system_call);
1004#endif
1011 1005
1012 /* Reserve all the builtin and the syscall vector: */ 1006 /* Reserve all the builtin and the syscall vector: */
1013 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) 1007 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
1014 set_bit(i, used_vectors); 1008 set_bit(i, used_vectors);
1015 1009
1010#ifdef CONFIG_X86_64
1011 set_bit(IA32_SYSCALL_VECTOR, used_vectors);
1012#else
1016 set_bit(SYSCALL_VECTOR, used_vectors); 1013 set_bit(SYSCALL_VECTOR, used_vectors);
1017#endif 1014#endif
1018 /* 1015 /*