aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/traps_32.c173
-rw-r--r--arch/x86/kernel/traps_64.c368
2 files changed, 536 insertions, 5 deletions
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c
index 076739863d24..ffb131f74f78 100644
--- a/arch/x86/kernel/traps_32.c
+++ b/arch/x86/kernel/traps_32.c
@@ -48,7 +48,6 @@
48 48
49#include <asm/stacktrace.h> 49#include <asm/stacktrace.h>
50#include <asm/processor.h> 50#include <asm/processor.h>
51#include <asm/kmemcheck.h>
52#include <asm/debugreg.h> 51#include <asm/debugreg.h>
53#include <asm/atomic.h> 52#include <asm/atomic.h>
54#include <asm/system.h> 53#include <asm/system.h>
@@ -59,6 +58,11 @@
59 58
60#include <mach_traps.h> 59#include <mach_traps.h>
61 60
61#ifdef CONFIG_X86_64
62#include <asm/pgalloc.h>
63#include <asm/proto.h>
64#include <asm/pda.h>
65#else
62#include <asm/processor-flags.h> 66#include <asm/processor-flags.h>
63#include <asm/arch_hooks.h> 67#include <asm/arch_hooks.h>
64#include <asm/nmi.h> 68#include <asm/nmi.h>
@@ -83,6 +87,7 @@ char ignore_fpu_irq;
83 */ 87 */
84gate_desc idt_table[256] 88gate_desc idt_table[256]
85 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; 89 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
90#endif
86 91
87static int ignore_nmis; 92static int ignore_nmis;
88 93
@@ -106,6 +111,7 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
106 dec_preempt_count(); 111 dec_preempt_count();
107} 112}
108 113
114#ifdef CONFIG_X86_32
109static inline void 115static inline void
110die_if_kernel(const char *str, struct pt_regs *regs, long err) 116die_if_kernel(const char *str, struct pt_regs *regs, long err)
111{ 117{
@@ -153,6 +159,7 @@ static int lazy_iobitmap_copy(void)
153 159
154 return 0; 160 return 0;
155} 161}
162#endif
156 163
157static void __kprobes 164static void __kprobes
158do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, 165do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
@@ -160,6 +167,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
160{ 167{
161 struct task_struct *tsk = current; 168 struct task_struct *tsk = current;
162 169
170#ifdef CONFIG_X86_32
163 if (regs->flags & X86_VM_MASK) { 171 if (regs->flags & X86_VM_MASK) {
164 /* 172 /*
165 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. 173 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
@@ -169,11 +177,14 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
169 goto vm86_trap; 177 goto vm86_trap;
170 goto trap_signal; 178 goto trap_signal;
171 } 179 }
180#endif
172 181
173 if (!user_mode(regs)) 182 if (!user_mode(regs))
174 goto kernel_trap; 183 goto kernel_trap;
175 184
185#ifdef CONFIG_X86_32
176trap_signal: 186trap_signal:
187#endif
177 /* 188 /*
178 * We want error_code and trap_no set for userspace faults and 189 * We want error_code and trap_no set for userspace faults and
179 * kernelspace faults which result in die(), but not 190 * kernelspace faults which result in die(), but not
@@ -186,6 +197,18 @@ trap_signal:
186 tsk->thread.error_code = error_code; 197 tsk->thread.error_code = error_code;
187 tsk->thread.trap_no = trapnr; 198 tsk->thread.trap_no = trapnr;
188 199
200#ifdef CONFIG_X86_64
201 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
202 printk_ratelimit()) {
203 printk(KERN_INFO
204 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
205 tsk->comm, tsk->pid, str,
206 regs->ip, regs->sp, error_code);
207 print_vma_addr(" in ", regs->ip);
208 printk("\n");
209 }
210#endif
211
189 if (info) 212 if (info)
190 force_sig_info(signr, info, tsk); 213 force_sig_info(signr, info, tsk);
191 else 214 else
@@ -200,11 +223,13 @@ kernel_trap:
200 } 223 }
201 return; 224 return;
202 225
226#ifdef CONFIG_X86_32
203vm86_trap: 227vm86_trap:
204 if (handle_vm86_trap((struct kernel_vm86_regs *) regs, 228 if (handle_vm86_trap((struct kernel_vm86_regs *) regs,
205 error_code, trapnr)) 229 error_code, trapnr))
206 goto trap_signal; 230 goto trap_signal;
207 return; 231 return;
232#endif
208} 233}
209 234
210#define DO_ERROR(trapnr, signr, str, name) \ 235#define DO_ERROR(trapnr, signr, str, name) \
@@ -239,9 +264,41 @@ DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
239DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) 264DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
240DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) 265DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
241DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) 266DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
267#ifdef CONFIG_X86_32
242DO_ERROR(12, SIGBUS, "stack segment", stack_segment) 268DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
269#endif
243DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) 270DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
244 271
272#ifdef CONFIG_X86_64
273/* Runs on IST stack */
274dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
275{
276 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
277 12, SIGBUS) == NOTIFY_STOP)
278 return;
279 preempt_conditional_sti(regs);
280 do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
281 preempt_conditional_cli(regs);
282}
283
284dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
285{
286 static const char str[] = "double fault";
287 struct task_struct *tsk = current;
288
289 /* Return not checked because double check cannot be ignored */
290 notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
291
292 tsk->thread.error_code = error_code;
293 tsk->thread.trap_no = 8;
294
295 /* This is always a kernel trap and never fixable (and thus must
296 never return). */
297 for (;;)
298 die(str, regs, error_code);
299}
300#endif
301
245dotraplinkage void __kprobes 302dotraplinkage void __kprobes
246do_general_protection(struct pt_regs *regs, long error_code) 303do_general_protection(struct pt_regs *regs, long error_code)
247{ 304{
@@ -249,6 +306,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
249 306
250 conditional_sti(regs); 307 conditional_sti(regs);
251 308
309#ifdef CONFIG_X86_32
252 if (lazy_iobitmap_copy()) { 310 if (lazy_iobitmap_copy()) {
253 /* restart the faulting instruction */ 311 /* restart the faulting instruction */
254 return; 312 return;
@@ -256,6 +314,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
256 314
257 if (regs->flags & X86_VM_MASK) 315 if (regs->flags & X86_VM_MASK)
258 goto gp_in_vm86; 316 goto gp_in_vm86;
317#endif
259 318
260 tsk = current; 319 tsk = current;
261 if (!user_mode(regs)) 320 if (!user_mode(regs))
@@ -277,10 +336,12 @@ do_general_protection(struct pt_regs *regs, long error_code)
277 force_sig(SIGSEGV, tsk); 336 force_sig(SIGSEGV, tsk);
278 return; 337 return;
279 338
339#ifdef CONFIG_X86_32
280gp_in_vm86: 340gp_in_vm86:
281 local_irq_enable(); 341 local_irq_enable();
282 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); 342 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
283 return; 343 return;
344#endif
284 345
285gp_in_kernel: 346gp_in_kernel:
286 if (fixup_exception(regs)) 347 if (fixup_exception(regs))
@@ -368,6 +429,7 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
368 printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); 429 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
369} 430}
370 431
432#ifdef CONFIG_X86_32
371static DEFINE_SPINLOCK(nmi_print_lock); 433static DEFINE_SPINLOCK(nmi_print_lock);
372 434
373void notrace __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic) 435void notrace __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
@@ -402,6 +464,7 @@ void notrace __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
402 464
403 do_exit(SIGSEGV); 465 do_exit(SIGSEGV);
404} 466}
467#endif
405 468
406static notrace __kprobes void default_do_nmi(struct pt_regs *regs) 469static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
407{ 470{
@@ -441,11 +504,13 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
441 mem_parity_error(reason, regs); 504 mem_parity_error(reason, regs);
442 if (reason & 0x40) 505 if (reason & 0x40)
443 io_check_error(reason, regs); 506 io_check_error(reason, regs);
507#ifdef CONFIG_X86_32
444 /* 508 /*
445 * Reassert NMI in case it became active meanwhile 509 * Reassert NMI in case it became active meanwhile
446 * as it's edge-triggered: 510 * as it's edge-triggered:
447 */ 511 */
448 reassert_nmi(); 512 reassert_nmi();
513#endif
449} 514}
450 515
451dotraplinkage notrace __kprobes void 516dotraplinkage notrace __kprobes void
@@ -453,7 +518,11 @@ do_nmi(struct pt_regs *regs, long error_code)
453{ 518{
454 nmi_enter(); 519 nmi_enter();
455 520
521#ifdef CONFIG_X86_32
456 { int cpu; cpu = smp_processor_id(); ++nmi_count(cpu); } 522 { int cpu; cpu = smp_processor_id(); ++nmi_count(cpu); }
523#else
524 add_pda(__nmi_count, 1);
525#endif
457 526
458 if (!ignore_nmis) 527 if (!ignore_nmis)
459 default_do_nmi(regs); 528 default_do_nmi(regs);
@@ -491,6 +560,29 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
491 preempt_conditional_cli(regs); 560 preempt_conditional_cli(regs);
492} 561}
493 562
563#ifdef CONFIG_X86_64
564/* Help handler running on IST stack to switch back to user stack
565 for scheduling or signal handling. The actual stack switch is done in
566 entry.S */
567asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
568{
569 struct pt_regs *regs = eregs;
570 /* Did already sync */
571 if (eregs == (struct pt_regs *)eregs->sp)
572 ;
573 /* Exception from user space */
574 else if (user_mode(eregs))
575 regs = task_pt_regs(current);
576 /* Exception from kernel and interrupts are enabled. Move to
577 kernel process stack. */
578 else if (eregs->flags & X86_EFLAGS_IF)
579 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
580 if (eregs != regs)
581 *regs = *eregs;
582 return regs;
583}
584#endif
585
494/* 586/*
495 * Our handling of the processor debug registers is non-trivial. 587 * Our handling of the processor debug registers is non-trivial.
496 * We do not clear them on entry and exit from the kernel. Therefore 588 * We do not clear them on entry and exit from the kernel. Therefore
@@ -542,8 +634,10 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
542 goto clear_dr7; 634 goto clear_dr7;
543 } 635 }
544 636
637#ifdef CONFIG_X86_32
545 if (regs->flags & X86_VM_MASK) 638 if (regs->flags & X86_VM_MASK)
546 goto debug_vm86; 639 goto debug_vm86;
640#endif
547 641
548 /* Save debug status register where ptrace can see it */ 642 /* Save debug status register where ptrace can see it */
549 tsk->thread.debugreg6 = condition; 643 tsk->thread.debugreg6 = condition;
@@ -570,10 +664,12 @@ clear_dr7:
570 preempt_conditional_cli(regs); 664 preempt_conditional_cli(regs);
571 return; 665 return;
572 666
667#ifdef CONFIG_X86_32
573debug_vm86: 668debug_vm86:
574 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); 669 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
575 preempt_conditional_cli(regs); 670 preempt_conditional_cli(regs);
576 return; 671 return;
672#endif
577 673
578clear_TF_reenable: 674clear_TF_reenable:
579 set_tsk_thread_flag(tsk, TIF_SINGLESTEP); 675 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
@@ -582,6 +678,20 @@ clear_TF_reenable:
582 return; 678 return;
583} 679}
584 680
681#ifdef CONFIG_X86_64
682static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
683{
684 if (fixup_exception(regs))
685 return 1;
686
687 notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
688 /* Illegal floating point operation in the kernel */
689 current->thread.trap_no = trapnr;
690 die(str, regs, 0);
691 return 0;
692}
693#endif
694
585/* 695/*
586 * Note that we play around with the 'TS' bit in an attempt to get 696 * Note that we play around with the 'TS' bit in an attempt to get
587 * the correct behaviour even in the presence of the asynchronous 697 * the correct behaviour even in the presence of the asynchronous
@@ -618,7 +728,9 @@ void math_error(void __user *ip)
618 swd = get_fpu_swd(task); 728 swd = get_fpu_swd(task);
619 switch (swd & ~cwd & 0x3f) { 729 switch (swd & ~cwd & 0x3f) {
620 case 0x000: /* No unmasked exception */ 730 case 0x000: /* No unmasked exception */
731#ifdef CONFIG_X86_32
621 return; 732 return;
733#endif
622 default: /* Multiple exceptions */ 734 default: /* Multiple exceptions */
623 break; 735 break;
624 case 0x001: /* Invalid Op */ 736 case 0x001: /* Invalid Op */
@@ -649,7 +761,15 @@ void math_error(void __user *ip)
649dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) 761dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
650{ 762{
651 conditional_sti(regs); 763 conditional_sti(regs);
764
765#ifdef CONFIG_X86_32
652 ignore_fpu_irq = 1; 766 ignore_fpu_irq = 1;
767#else
768 if (!user_mode(regs) &&
769 kernel_math_error(regs, "kernel x87 math error", 16))
770 return;
771#endif
772
653 math_error((void __user *)regs->ip); 773 math_error((void __user *)regs->ip);
654} 774}
655 775
@@ -706,6 +826,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
706{ 826{
707 conditional_sti(regs); 827 conditional_sti(regs);
708 828
829#ifdef CONFIG_X86_32
709 if (cpu_has_xmm) { 830 if (cpu_has_xmm) {
710 /* Handle SIMD FPU exceptions on PIII+ processors. */ 831 /* Handle SIMD FPU exceptions on PIII+ processors. */
711 ignore_fpu_irq = 1; 832 ignore_fpu_irq = 1;
@@ -724,6 +845,12 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
724 current->thread.error_code = error_code; 845 current->thread.error_code = error_code;
725 die_if_kernel("cache flush denied", regs, error_code); 846 die_if_kernel("cache flush denied", regs, error_code);
726 force_sig(SIGSEGV, current); 847 force_sig(SIGSEGV, current);
848#else
849 if (!user_mode(regs) &&
850 kernel_math_error(regs, "kernel simd math error", 19))
851 return;
852 simd_math_error((void __user *)regs->ip);
853#endif
727} 854}
728 855
729dotraplinkage void 856dotraplinkage void
@@ -736,6 +863,7 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
736#endif 863#endif
737} 864}
738 865
866#ifdef CONFIG_X86_32
739unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp) 867unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp)
740{ 868{
741 struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id()); 869 struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id());
@@ -754,6 +882,15 @@ unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp)
754 882
755 return new_kesp; 883 return new_kesp;
756} 884}
885#else
886asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
887{
888}
889
890asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
891{
892}
893#endif
757 894
758/* 895/*
759 * 'math_state_restore()' saves the current math information in the 896 * 'math_state_restore()' saves the current math information in the
@@ -786,14 +923,24 @@ asmlinkage void math_state_restore(void)
786 } 923 }
787 924
788 clts(); /* Allow maths ops (or we recurse) */ 925 clts(); /* Allow maths ops (or we recurse) */
926#ifdef CONFIG_X86_32
789 restore_fpu(tsk); 927 restore_fpu(tsk);
928#else
929 /*
930 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
931 */
932 if (unlikely(restore_fpu_checking(tsk))) {
933 stts();
934 force_sig(SIGSEGV, tsk);
935 return;
936 }
937#endif
790 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ 938 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
791 tsk->fpu_counter++; 939 tsk->fpu_counter++;
792} 940}
793EXPORT_SYMBOL_GPL(math_state_restore); 941EXPORT_SYMBOL_GPL(math_state_restore);
794 942
795#ifndef CONFIG_MATH_EMULATION 943#ifndef CONFIG_MATH_EMULATION
796
797asmlinkage void math_emulate(long arg) 944asmlinkage void math_emulate(long arg)
798{ 945{
799 printk(KERN_EMERG 946 printk(KERN_EMERG
@@ -802,12 +949,12 @@ asmlinkage void math_emulate(long arg)
802 force_sig(SIGFPE, current); 949 force_sig(SIGFPE, current);
803 schedule(); 950 schedule();
804} 951}
805
806#endif /* CONFIG_MATH_EMULATION */ 952#endif /* CONFIG_MATH_EMULATION */
807 953
808dotraplinkage void __kprobes 954dotraplinkage void __kprobes
809do_device_not_available(struct pt_regs *regs, long error) 955do_device_not_available(struct pt_regs *regs, long error)
810{ 956{
957#ifdef CONFIG_X86_32
811 if (read_cr0() & X86_CR0_EM) { 958 if (read_cr0() & X86_CR0_EM) {
812 conditional_sti(regs); 959 conditional_sti(regs);
813 math_emulate(0); 960 math_emulate(0);
@@ -815,8 +962,12 @@ do_device_not_available(struct pt_regs *regs, long error)
815 math_state_restore(); /* interrupts still off */ 962 math_state_restore(); /* interrupts still off */
816 conditional_sti(regs); 963 conditional_sti(regs);
817 } 964 }
965#else
966 math_state_restore();
967#endif
818} 968}
819 969
970#ifdef CONFIG_X86_32
820#ifdef CONFIG_X86_MCE 971#ifdef CONFIG_X86_MCE
821dotraplinkage void __kprobes do_machine_check(struct pt_regs *regs, long error) 972dotraplinkage void __kprobes do_machine_check(struct pt_regs *regs, long error)
822{ 973{
@@ -839,10 +990,13 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
839 return; 990 return;
840 do_trap(32, SIGILL, "iret exception", regs, error_code, &info); 991 do_trap(32, SIGILL, "iret exception", regs, error_code, &info);
841} 992}
993#endif
842 994
843void __init trap_init(void) 995void __init trap_init(void)
844{ 996{
997#ifdef CONFIG_X86_32
845 int i; 998 int i;
999#endif
846 1000
847#ifdef CONFIG_EISA 1001#ifdef CONFIG_EISA
848 void __iomem *p = early_ioremap(0x0FFFD9, 4); 1002 void __iomem *p = early_ioremap(0x0FFFD9, 4);
@@ -862,7 +1016,11 @@ void __init trap_init(void)
862 set_intr_gate(5, &bounds); 1016 set_intr_gate(5, &bounds);
863 set_intr_gate(6, &invalid_op); 1017 set_intr_gate(6, &invalid_op);
864 set_intr_gate(7, &device_not_available); 1018 set_intr_gate(7, &device_not_available);
1019#ifdef CONFIG_X86_32
865 set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS); 1020 set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS);
1021#else
1022 set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK);
1023#endif
866 set_intr_gate(9, &coprocessor_segment_overrun); 1024 set_intr_gate(9, &coprocessor_segment_overrun);
867 set_intr_gate(10, &invalid_TSS); 1025 set_intr_gate(10, &invalid_TSS);
868 set_intr_gate(11, &segment_not_present); 1026 set_intr_gate(11, &segment_not_present);
@@ -877,6 +1035,11 @@ void __init trap_init(void)
877#endif 1035#endif
878 set_intr_gate(19, &simd_coprocessor_error); 1036 set_intr_gate(19, &simd_coprocessor_error);
879 1037
1038#ifdef CONFIG_IA32_EMULATION
1039 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
1040#endif
1041
1042#ifdef CONFIG_X86_32
880 if (cpu_has_fxsr) { 1043 if (cpu_has_fxsr) {
881 printk(KERN_INFO "Enabling fast FPU save and restore... "); 1044 printk(KERN_INFO "Enabling fast FPU save and restore... ");
882 set_in_cr4(X86_CR4_OSFXSR); 1045 set_in_cr4(X86_CR4_OSFXSR);
@@ -896,11 +1059,13 @@ void __init trap_init(void)
896 set_bit(i, used_vectors); 1059 set_bit(i, used_vectors);
897 1060
898 set_bit(SYSCALL_VECTOR, used_vectors); 1061 set_bit(SYSCALL_VECTOR, used_vectors);
899 1062#endif
900 /* 1063 /*
901 * Should be a barrier for any external CPU state: 1064 * Should be a barrier for any external CPU state:
902 */ 1065 */
903 cpu_init(); 1066 cpu_init();
904 1067
1068#ifdef CONFIG_X86_32
905 trap_init_hook(); 1069 trap_init_hook();
1070#endif
906} 1071}
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index 22fe62a24edb..60ecc855ab81 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -33,13 +33,21 @@
33#include <linux/smp.h> 33#include <linux/smp.h>
34#include <linux/io.h> 34#include <linux/io.h>
35 35
36#ifdef CONFIG_EISA
37#include <linux/ioport.h>
38#include <linux/eisa.h>
39#endif
40
41#ifdef CONFIG_MCA
42#include <linux/mca.h>
43#endif
44
36#if defined(CONFIG_EDAC) 45#if defined(CONFIG_EDAC)
37#include <linux/edac.h> 46#include <linux/edac.h>
38#endif 47#endif
39 48
40#include <asm/stacktrace.h> 49#include <asm/stacktrace.h>
41#include <asm/processor.h> 50#include <asm/processor.h>
42#include <asm/kmemcheck.h>
43#include <asm/debugreg.h> 51#include <asm/debugreg.h>
44#include <asm/atomic.h> 52#include <asm/atomic.h>
45#include <asm/system.h> 53#include <asm/system.h>
@@ -50,10 +58,35 @@
50 58
51#include <mach_traps.h> 59#include <mach_traps.h>
52 60
61#ifdef CONFIG_X86_64
53#include <asm/pgalloc.h> 62#include <asm/pgalloc.h>
54#include <asm/proto.h> 63#include <asm/proto.h>
55#include <asm/pda.h> 64#include <asm/pda.h>
65#else
66#include <asm/processor-flags.h>
67#include <asm/arch_hooks.h>
68#include <asm/nmi.h>
69#include <asm/smp.h>
70#include <asm/io.h>
71
72#include "cpu/mcheck/mce.h"
56 73
74DECLARE_BITMAP(used_vectors, NR_VECTORS);
75EXPORT_SYMBOL_GPL(used_vectors);
76
77asmlinkage int system_call(void);
78
79/* Do we ignore FPU interrupts ? */
80char ignore_fpu_irq;
81
82/*
83 * The IDT has to be page-aligned to simplify the Pentium
84 * F0 0F bug workaround.. We have a special link segment
85 * for this.
86 */
87gate_desc idt_table[256]
88 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
89#endif
57 90
58static int ignore_nmis; 91static int ignore_nmis;
59 92
@@ -77,15 +110,80 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
77 dec_preempt_count(); 110 dec_preempt_count();
78} 111}
79 112
113#ifdef CONFIG_X86_32
114static inline void
115die_if_kernel(const char *str, struct pt_regs *regs, long err)
116{
117 if (!user_mode_vm(regs))
118 die(str, regs, err);
119}
120
121/*
122 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
123 * invalid offset set (the LAZY one) and the faulting thread has
124 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS,
125 * we set the offset field correctly and return 1.
126 */
127static int lazy_iobitmap_copy(void)
128{
129 struct thread_struct *thread;
130 struct tss_struct *tss;
131 int cpu;
132
133 cpu = get_cpu();
134 tss = &per_cpu(init_tss, cpu);
135 thread = &current->thread;
136
137 if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
138 thread->io_bitmap_ptr) {
139 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
140 thread->io_bitmap_max);
141 /*
142 * If the previously set map was extending to higher ports
143 * than the current one, pad extra space with 0xff (no access).
144 */
145 if (thread->io_bitmap_max < tss->io_bitmap_max) {
146 memset((char *) tss->io_bitmap +
147 thread->io_bitmap_max, 0xff,
148 tss->io_bitmap_max - thread->io_bitmap_max);
149 }
150 tss->io_bitmap_max = thread->io_bitmap_max;
151 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
152 tss->io_bitmap_owner = thread;
153 put_cpu();
154
155 return 1;
156 }
157 put_cpu();
158
159 return 0;
160}
161#endif
162
80static void __kprobes 163static void __kprobes
81do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, 164do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
82 long error_code, siginfo_t *info) 165 long error_code, siginfo_t *info)
83{ 166{
84 struct task_struct *tsk = current; 167 struct task_struct *tsk = current;
85 168
169#ifdef CONFIG_X86_32
170 if (regs->flags & X86_VM_MASK) {
171 /*
172 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
173 * On nmi (interrupt 2), do_trap should not be called.
174 */
175 if (trapnr < 6)
176 goto vm86_trap;
177 goto trap_signal;
178 }
179#endif
180
86 if (!user_mode(regs)) 181 if (!user_mode(regs))
87 goto kernel_trap; 182 goto kernel_trap;
88 183
184#ifdef CONFIG_X86_32
185trap_signal:
186#endif
89 /* 187 /*
90 * We want error_code and trap_no set for userspace faults and 188 * We want error_code and trap_no set for userspace faults and
91 * kernelspace faults which result in die(), but not 189 * kernelspace faults which result in die(), but not
@@ -98,6 +196,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
98 tsk->thread.error_code = error_code; 196 tsk->thread.error_code = error_code;
99 tsk->thread.trap_no = trapnr; 197 tsk->thread.trap_no = trapnr;
100 198
199#ifdef CONFIG_X86_64
101 if (show_unhandled_signals && unhandled_signal(tsk, signr) && 200 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
102 printk_ratelimit()) { 201 printk_ratelimit()) {
103 printk(KERN_INFO 202 printk(KERN_INFO
@@ -107,6 +206,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
107 print_vma_addr(" in ", regs->ip); 206 print_vma_addr(" in ", regs->ip);
108 printk("\n"); 207 printk("\n");
109 } 208 }
209#endif
110 210
111 if (info) 211 if (info)
112 force_sig_info(signr, info, tsk); 212 force_sig_info(signr, info, tsk);
@@ -121,6 +221,14 @@ kernel_trap:
121 die(str, regs, error_code); 221 die(str, regs, error_code);
122 } 222 }
123 return; 223 return;
224
225#ifdef CONFIG_X86_32
226vm86_trap:
227 if (handle_vm86_trap((struct kernel_vm86_regs *) regs,
228 error_code, trapnr))
229 goto trap_signal;
230 return;
231#endif
124} 232}
125 233
126#define DO_ERROR(trapnr, signr, str, name) \ 234#define DO_ERROR(trapnr, signr, str, name) \
@@ -155,8 +263,12 @@ DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
155DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) 263DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
156DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) 264DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
157DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) 265DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
266#ifdef CONFIG_X86_32
267DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
268#endif
158DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) 269DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
159 270
271#ifdef CONFIG_X86_64
160/* Runs on IST stack */ 272/* Runs on IST stack */
161dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) 273dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
162{ 274{
@@ -184,6 +296,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
184 for (;;) 296 for (;;)
185 die(str, regs, error_code); 297 die(str, regs, error_code);
186} 298}
299#endif
187 300
188dotraplinkage void __kprobes 301dotraplinkage void __kprobes
189do_general_protection(struct pt_regs *regs, long error_code) 302do_general_protection(struct pt_regs *regs, long error_code)
@@ -192,6 +305,16 @@ do_general_protection(struct pt_regs *regs, long error_code)
192 305
193 conditional_sti(regs); 306 conditional_sti(regs);
194 307
308#ifdef CONFIG_X86_32
309 if (lazy_iobitmap_copy()) {
310 /* restart the faulting instruction */
311 return;
312 }
313
314 if (regs->flags & X86_VM_MASK)
315 goto gp_in_vm86;
316#endif
317
195 tsk = current; 318 tsk = current;
196 if (!user_mode(regs)) 319 if (!user_mode(regs))
197 goto gp_in_kernel; 320 goto gp_in_kernel;
@@ -212,6 +335,13 @@ do_general_protection(struct pt_regs *regs, long error_code)
212 force_sig(SIGSEGV, tsk); 335 force_sig(SIGSEGV, tsk);
213 return; 336 return;
214 337
338#ifdef CONFIG_X86_32
339gp_in_vm86:
340 local_irq_enable();
341 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
342 return;
343#endif
344
215gp_in_kernel: 345gp_in_kernel:
216 if (fixup_exception(regs)) 346 if (fixup_exception(regs))
217 return; 347 return;
@@ -277,6 +407,16 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
277 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == 407 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
278 NOTIFY_STOP) 408 NOTIFY_STOP)
279 return; 409 return;
410#ifdef CONFIG_MCA
411 /*
412 * Might actually be able to figure out what the guilty party
413 * is:
414 */
415 if (MCA_bus) {
416 mca_handle_nmi();
417 return;
418 }
419#endif
280 printk(KERN_EMERG 420 printk(KERN_EMERG
281 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", 421 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
282 reason, smp_processor_id()); 422 reason, smp_processor_id());
@@ -288,6 +428,43 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
288 printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); 428 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
289} 429}
290 430
431#ifdef CONFIG_X86_32
432static DEFINE_SPINLOCK(nmi_print_lock);
433
434void notrace __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
435{
436 if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
437 return;
438
439 spin_lock(&nmi_print_lock);
440 /*
441 * We are in trouble anyway, lets at least try
442 * to get a message out:
443 */
444 bust_spinlocks(1);
445 printk(KERN_EMERG "%s", str);
446 printk(" on CPU%d, ip %08lx, registers:\n",
447 smp_processor_id(), regs->ip);
448 show_registers(regs);
449 if (do_panic)
450 panic("Non maskable interrupt");
451 console_silent();
452 spin_unlock(&nmi_print_lock);
453 bust_spinlocks(0);
454
455 /*
456 * If we are in kernel we are probably nested up pretty bad
457 * and might aswell get out now while we still can:
458 */
459 if (!user_mode_vm(regs)) {
460 current->thread.trap_no = 2;
461 crash_kexec(regs);
462 }
463
464 do_exit(SIGSEGV);
465}
466#endif
467
291static notrace __kprobes void default_do_nmi(struct pt_regs *regs) 468static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
292{ 469{
293 unsigned char reason = 0; 470 unsigned char reason = 0;
@@ -303,6 +480,7 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
303 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT) 480 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
304 == NOTIFY_STOP) 481 == NOTIFY_STOP)
305 return; 482 return;
483#ifdef CONFIG_X86_LOCAL_APIC
306 /* 484 /*
307 * Ok, so this is none of the documented NMI sources, 485 * Ok, so this is none of the documented NMI sources,
308 * so it must be the NMI watchdog. 486 * so it must be the NMI watchdog.
@@ -311,6 +489,9 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
311 return; 489 return;
312 if (!do_nmi_callback(regs, cpu)) 490 if (!do_nmi_callback(regs, cpu))
313 unknown_nmi_error(reason, regs); 491 unknown_nmi_error(reason, regs);
492#else
493 unknown_nmi_error(reason, regs);
494#endif
314 495
315 return; 496 return;
316 } 497 }
@@ -322,6 +503,13 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
322 mem_parity_error(reason, regs); 503 mem_parity_error(reason, regs);
323 if (reason & 0x40) 504 if (reason & 0x40)
324 io_check_error(reason, regs); 505 io_check_error(reason, regs);
506#ifdef CONFIG_X86_32
507 /*
508 * Reassert NMI in case it became active meanwhile
509 * as it's edge-triggered:
510 */
511 reassert_nmi();
512#endif
325} 513}
326 514
327dotraplinkage notrace __kprobes void 515dotraplinkage notrace __kprobes void
@@ -329,7 +517,11 @@ do_nmi(struct pt_regs *regs, long error_code)
329{ 517{
330 nmi_enter(); 518 nmi_enter();
331 519
520#ifdef CONFIG_X86_32
521 { int cpu; cpu = smp_processor_id(); ++nmi_count(cpu); }
522#else
332 add_pda(__nmi_count, 1); 523 add_pda(__nmi_count, 1);
524#endif
333 525
334 if (!ignore_nmis) 526 if (!ignore_nmis)
335 default_do_nmi(regs); 527 default_do_nmi(regs);
@@ -352,15 +544,22 @@ void restart_nmi(void)
352/* May run on IST stack. */ 544/* May run on IST stack. */
353dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) 545dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
354{ 546{
547#ifdef CONFIG_KPROBES
355 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) 548 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
356 == NOTIFY_STOP) 549 == NOTIFY_STOP)
357 return; 550 return;
551#else
552 if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP)
553 == NOTIFY_STOP)
554 return;
555#endif
358 556
359 preempt_conditional_sti(regs); 557 preempt_conditional_sti(regs);
360 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); 558 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
361 preempt_conditional_cli(regs); 559 preempt_conditional_cli(regs);
362} 560}
363 561
562#ifdef CONFIG_X86_64
364/* Help handler running on IST stack to switch back to user stack 563/* Help handler running on IST stack to switch back to user stack
365 for scheduling or signal handling. The actual stack switch is done in 564 for scheduling or signal handling. The actual stack switch is done in
366 entry.S */ 565 entry.S */
@@ -381,6 +580,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
381 *regs = *eregs; 580 *regs = *eregs;
382 return regs; 581 return regs;
383} 582}
583#endif
384 584
385/* 585/*
386 * Our handling of the processor debug registers is non-trivial. 586 * Our handling of the processor debug registers is non-trivial.
@@ -433,6 +633,11 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
433 goto clear_dr7; 633 goto clear_dr7;
434 } 634 }
435 635
636#ifdef CONFIG_X86_32
637 if (regs->flags & X86_VM_MASK)
638 goto debug_vm86;
639#endif
640
436 /* Save debug status register where ptrace can see it */ 641 /* Save debug status register where ptrace can see it */
437 tsk->thread.debugreg6 = condition; 642 tsk->thread.debugreg6 = condition;
438 643
@@ -458,6 +663,13 @@ clear_dr7:
458 preempt_conditional_cli(regs); 663 preempt_conditional_cli(regs);
459 return; 664 return;
460 665
666#ifdef CONFIG_X86_32
667debug_vm86:
668 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
669 preempt_conditional_cli(regs);
670 return;
671#endif
672
461clear_TF_reenable: 673clear_TF_reenable:
462 set_tsk_thread_flag(tsk, TIF_SINGLESTEP); 674 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
463 regs->flags &= ~X86_EFLAGS_TF; 675 regs->flags &= ~X86_EFLAGS_TF;
@@ -465,6 +677,7 @@ clear_TF_reenable:
465 return; 677 return;
466} 678}
467 679
680#ifdef CONFIG_X86_64
468static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr) 681static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
469{ 682{
470 if (fixup_exception(regs)) 683 if (fixup_exception(regs))
@@ -476,6 +689,7 @@ static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
476 die(str, regs, 0); 689 die(str, regs, 0);
477 return 0; 690 return 0;
478} 691}
692#endif
479 693
480/* 694/*
481 * Note that we play around with the 'TS' bit in an attempt to get 695 * Note that we play around with the 'TS' bit in an attempt to get
@@ -513,6 +727,9 @@ void math_error(void __user *ip)
513 swd = get_fpu_swd(task); 727 swd = get_fpu_swd(task);
514 switch (swd & ~cwd & 0x3f) { 728 switch (swd & ~cwd & 0x3f) {
515 case 0x000: /* No unmasked exception */ 729 case 0x000: /* No unmasked exception */
730#ifdef CONFIG_X86_32
731 return;
732#endif
516 default: /* Multiple exceptions */ 733 default: /* Multiple exceptions */
517 break; 734 break;
518 case 0x001: /* Invalid Op */ 735 case 0x001: /* Invalid Op */
@@ -543,9 +760,15 @@ void math_error(void __user *ip)
543dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) 760dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
544{ 761{
545 conditional_sti(regs); 762 conditional_sti(regs);
763
764#ifdef CONFIG_X86_32
765 ignore_fpu_irq = 1;
766#else
546 if (!user_mode(regs) && 767 if (!user_mode(regs) &&
547 kernel_math_error(regs, "kernel x87 math error", 16)) 768 kernel_math_error(regs, "kernel x87 math error", 16))
548 return; 769 return;
770#endif
771
549 math_error((void __user *)regs->ip); 772 math_error((void __user *)regs->ip);
550} 773}
551 774
@@ -601,17 +824,64 @@ dotraplinkage void
601do_simd_coprocessor_error(struct pt_regs *regs, long error_code) 824do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
602{ 825{
603 conditional_sti(regs); 826 conditional_sti(regs);
827
828#ifdef CONFIG_X86_32
829 if (cpu_has_xmm) {
830 /* Handle SIMD FPU exceptions on PIII+ processors. */
831 ignore_fpu_irq = 1;
832 simd_math_error((void __user *)regs->ip);
833 return;
834 }
835 /*
836 * Handle strange cache flush from user space exception
837 * in all other cases. This is undocumented behaviour.
838 */
839 if (regs->flags & X86_VM_MASK) {
840 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
841 return;
842 }
843 current->thread.trap_no = 19;
844 current->thread.error_code = error_code;
845 die_if_kernel("cache flush denied", regs, error_code);
846 force_sig(SIGSEGV, current);
847#else
604 if (!user_mode(regs) && 848 if (!user_mode(regs) &&
605 kernel_math_error(regs, "kernel simd math error", 19)) 849 kernel_math_error(regs, "kernel simd math error", 19))
606 return; 850 return;
607 simd_math_error((void __user *)regs->ip); 851 simd_math_error((void __user *)regs->ip);
852#endif
608} 853}
609 854
610dotraplinkage void 855dotraplinkage void
611do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) 856do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
612{ 857{
858 conditional_sti(regs);
859#if 0
860 /* No need to warn about this any longer. */
861 printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
862#endif
613} 863}
614 864
865#ifdef CONFIG_X86_32
866unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp)
867{
868 struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id());
869 unsigned long base = (kesp - uesp) & -THREAD_SIZE;
870 unsigned long new_kesp = kesp - base;
871 unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
872 __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
873
874 /* Set up base for espfix segment */
875 desc &= 0x00f0ff0000000000ULL;
876 desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
877 ((((__u64)base) << 32) & 0xff00000000000000ULL) |
878 ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
879 (lim_pages & 0xffff);
880 *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
881
882 return new_kesp;
883}
884#else
615asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) 885asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
616{ 886{
617} 887}
@@ -619,6 +889,7 @@ asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
619asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void) 889asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
620{ 890{
621} 891}
892#endif
622 893
623/* 894/*
624 * 'math_state_restore()' saves the current math information in the 895 * 'math_state_restore()' saves the current math information in the
@@ -626,6 +897,9 @@ asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
626 * 897 *
627 * Careful.. There are problems with IBM-designed IRQ13 behaviour. 898 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
628 * Don't touch unless you *really* know how it works. 899 * Don't touch unless you *really* know how it works.
900 *
901 * Must be called with kernel preemption disabled (in this case,
902 * local interrupts are disabled at the call-site in entry.S).
629 */ 903 */
630asmlinkage void math_state_restore(void) 904asmlinkage void math_state_restore(void)
631{ 905{
@@ -648,6 +922,9 @@ asmlinkage void math_state_restore(void)
648 } 922 }
649 923
650 clts(); /* Allow maths ops (or we recurse) */ 924 clts(); /* Allow maths ops (or we recurse) */
925#ifdef CONFIG_X86_32
926 restore_fpu(tsk);
927#else
651 /* 928 /*
652 * Paranoid restore. send a SIGSEGV if we fail to restore the state. 929 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
653 */ 930 */
@@ -656,19 +933,78 @@ asmlinkage void math_state_restore(void)
656 force_sig(SIGSEGV, tsk); 933 force_sig(SIGSEGV, tsk);
657 return; 934 return;
658 } 935 }
936#endif
659 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ 937 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
660 tsk->fpu_counter++; 938 tsk->fpu_counter++;
661} 939}
662EXPORT_SYMBOL_GPL(math_state_restore); 940EXPORT_SYMBOL_GPL(math_state_restore);
663 941
942#ifndef CONFIG_MATH_EMULATION
943asmlinkage void math_emulate(long arg)
944{
945 printk(KERN_EMERG
946 "math-emulation not enabled and no coprocessor found.\n");
947 printk(KERN_EMERG "killing %s.\n", current->comm);
948 force_sig(SIGFPE, current);
949 schedule();
950}
951#endif /* CONFIG_MATH_EMULATION */
952
664dotraplinkage void __kprobes 953dotraplinkage void __kprobes
665do_device_not_available(struct pt_regs *regs, long error) 954do_device_not_available(struct pt_regs *regs, long error)
666{ 955{
956#ifdef CONFIG_X86_32
957 if (read_cr0() & X86_CR0_EM) {
958 conditional_sti(regs);
959 math_emulate(0);
960 } else {
961 math_state_restore(); /* interrupts still off */
962 conditional_sti(regs);
963 }
964#else
667 math_state_restore(); 965 math_state_restore();
966#endif
967}
968
969#ifdef CONFIG_X86_32
970#ifdef CONFIG_X86_MCE
971dotraplinkage void __kprobes do_machine_check(struct pt_regs *regs, long error)
972{
973 conditional_sti(regs);
974 machine_check_vector(regs, error);
668} 975}
976#endif
977
978dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
979{
980 siginfo_t info;
981 local_irq_enable();
982
983 info.si_signo = SIGILL;
984 info.si_errno = 0;
985 info.si_code = ILL_BADSTK;
986 info.si_addr = 0;
987 if (notify_die(DIE_TRAP, "iret exception",
988 regs, error_code, 32, SIGILL) == NOTIFY_STOP)
989 return;
990 do_trap(32, SIGILL, "iret exception", regs, error_code, &info);
991}
992#endif
669 993
670void __init trap_init(void) 994void __init trap_init(void)
671{ 995{
996#ifdef CONFIG_X86_32
997 int i;
998#endif
999
1000#ifdef CONFIG_EISA
1001 void __iomem *p = early_ioremap(0x0FFFD9, 4);
1002
1003 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
1004 EISA_bus = 1;
1005 early_iounmap(p, 4);
1006#endif
1007
672 set_intr_gate(0, &divide_error); 1008 set_intr_gate(0, &divide_error);
673 set_intr_gate_ist(1, &debug, DEBUG_STACK); 1009 set_intr_gate_ist(1, &debug, DEBUG_STACK);
674 set_intr_gate_ist(2, &nmi, NMI_STACK); 1010 set_intr_gate_ist(2, &nmi, NMI_STACK);
@@ -679,7 +1015,11 @@ void __init trap_init(void)
679 set_intr_gate(5, &bounds); 1015 set_intr_gate(5, &bounds);
680 set_intr_gate(6, &invalid_op); 1016 set_intr_gate(6, &invalid_op);
681 set_intr_gate(7, &device_not_available); 1017 set_intr_gate(7, &device_not_available);
1018#ifdef CONFIG_X86_32
1019 set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS);
1020#else
682 set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK); 1021 set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK);
1022#endif
683 set_intr_gate(9, &coprocessor_segment_overrun); 1023 set_intr_gate(9, &coprocessor_segment_overrun);
684 set_intr_gate(10, &invalid_TSS); 1024 set_intr_gate(10, &invalid_TSS);
685 set_intr_gate(11, &segment_not_present); 1025 set_intr_gate(11, &segment_not_present);
@@ -697,8 +1037,34 @@ void __init trap_init(void)
697#ifdef CONFIG_IA32_EMULATION 1037#ifdef CONFIG_IA32_EMULATION
698 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall); 1038 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
699#endif 1039#endif
1040
1041#ifdef CONFIG_X86_32
1042 if (cpu_has_fxsr) {
1043 printk(KERN_INFO "Enabling fast FPU save and restore... ");
1044 set_in_cr4(X86_CR4_OSFXSR);
1045 printk("done.\n");
1046 }
1047 if (cpu_has_xmm) {
1048 printk(KERN_INFO
1049 "Enabling unmasked SIMD FPU exception support... ");
1050 set_in_cr4(X86_CR4_OSXMMEXCPT);
1051 printk("done.\n");
1052 }
1053
1054 set_system_trap_gate(SYSCALL_VECTOR, &system_call);
1055
1056 /* Reserve all the builtin and the syscall vector: */
1057 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
1058 set_bit(i, used_vectors);
1059
1060 set_bit(SYSCALL_VECTOR, used_vectors);
1061#endif
700 /* 1062 /*
701 * Should be a barrier for any external CPU state: 1063 * Should be a barrier for any external CPU state:
702 */ 1064 */
703 cpu_init(); 1065 cpu_init();
1066
1067#ifdef CONFIG_X86_32
1068 trap_init_hook();
1069#endif
704} 1070}