aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/traps_32.c
diff options
context:
space:
mode:
authorAlexander van Heukelum <heukelum@fastmail.fm>2008-10-03 16:00:39 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-13 04:33:28 -0400
commit081f75bbdc86de53537e1b5aca01de72bd2fea6b (patch)
tree739b7954495fb63559b0184b76cc32c63aa2bdcd /arch/x86/kernel/traps_32.c
parentc1d518c8422ff7d3f377958771b265753028579c (diff)
traps: x86: make traps_32.c and traps_64.c equal
Use CONFIG_X86_64/CONFIG_X86_32 to condtionally compile the parts needed for x86_64 or i386 only. Runs a small userspace for a number of minimal configurations and boots the defconfigs. Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/traps_32.c')
-rw-r--r--arch/x86/kernel/traps_32.c173
1 files changed, 169 insertions, 4 deletions
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c
index 076739863d24..ffb131f74f78 100644
--- a/arch/x86/kernel/traps_32.c
+++ b/arch/x86/kernel/traps_32.c
@@ -48,7 +48,6 @@
48 48
49#include <asm/stacktrace.h> 49#include <asm/stacktrace.h>
50#include <asm/processor.h> 50#include <asm/processor.h>
51#include <asm/kmemcheck.h>
52#include <asm/debugreg.h> 51#include <asm/debugreg.h>
53#include <asm/atomic.h> 52#include <asm/atomic.h>
54#include <asm/system.h> 53#include <asm/system.h>
@@ -59,6 +58,11 @@
59 58
60#include <mach_traps.h> 59#include <mach_traps.h>
61 60
61#ifdef CONFIG_X86_64
62#include <asm/pgalloc.h>
63#include <asm/proto.h>
64#include <asm/pda.h>
65#else
62#include <asm/processor-flags.h> 66#include <asm/processor-flags.h>
63#include <asm/arch_hooks.h> 67#include <asm/arch_hooks.h>
64#include <asm/nmi.h> 68#include <asm/nmi.h>
@@ -83,6 +87,7 @@ char ignore_fpu_irq;
83 */ 87 */
84gate_desc idt_table[256] 88gate_desc idt_table[256]
85 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; 89 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
90#endif
86 91
87static int ignore_nmis; 92static int ignore_nmis;
88 93
@@ -106,6 +111,7 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
106 dec_preempt_count(); 111 dec_preempt_count();
107} 112}
108 113
114#ifdef CONFIG_X86_32
109static inline void 115static inline void
110die_if_kernel(const char *str, struct pt_regs *regs, long err) 116die_if_kernel(const char *str, struct pt_regs *regs, long err)
111{ 117{
@@ -153,6 +159,7 @@ static int lazy_iobitmap_copy(void)
153 159
154 return 0; 160 return 0;
155} 161}
162#endif
156 163
157static void __kprobes 164static void __kprobes
158do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, 165do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
@@ -160,6 +167,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
160{ 167{
161 struct task_struct *tsk = current; 168 struct task_struct *tsk = current;
162 169
170#ifdef CONFIG_X86_32
163 if (regs->flags & X86_VM_MASK) { 171 if (regs->flags & X86_VM_MASK) {
164 /* 172 /*
165 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. 173 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
@@ -169,11 +177,14 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
169 goto vm86_trap; 177 goto vm86_trap;
170 goto trap_signal; 178 goto trap_signal;
171 } 179 }
180#endif
172 181
173 if (!user_mode(regs)) 182 if (!user_mode(regs))
174 goto kernel_trap; 183 goto kernel_trap;
175 184
185#ifdef CONFIG_X86_32
176trap_signal: 186trap_signal:
187#endif
177 /* 188 /*
178 * We want error_code and trap_no set for userspace faults and 189 * We want error_code and trap_no set for userspace faults and
179 * kernelspace faults which result in die(), but not 190 * kernelspace faults which result in die(), but not
@@ -186,6 +197,18 @@ trap_signal:
186 tsk->thread.error_code = error_code; 197 tsk->thread.error_code = error_code;
187 tsk->thread.trap_no = trapnr; 198 tsk->thread.trap_no = trapnr;
188 199
200#ifdef CONFIG_X86_64
201 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
202 printk_ratelimit()) {
203 printk(KERN_INFO
204 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
205 tsk->comm, tsk->pid, str,
206 regs->ip, regs->sp, error_code);
207 print_vma_addr(" in ", regs->ip);
208 printk("\n");
209 }
210#endif
211
189 if (info) 212 if (info)
190 force_sig_info(signr, info, tsk); 213 force_sig_info(signr, info, tsk);
191 else 214 else
@@ -200,11 +223,13 @@ kernel_trap:
200 } 223 }
201 return; 224 return;
202 225
226#ifdef CONFIG_X86_32
203vm86_trap: 227vm86_trap:
204 if (handle_vm86_trap((struct kernel_vm86_regs *) regs, 228 if (handle_vm86_trap((struct kernel_vm86_regs *) regs,
205 error_code, trapnr)) 229 error_code, trapnr))
206 goto trap_signal; 230 goto trap_signal;
207 return; 231 return;
232#endif
208} 233}
209 234
210#define DO_ERROR(trapnr, signr, str, name) \ 235#define DO_ERROR(trapnr, signr, str, name) \
@@ -239,9 +264,41 @@ DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
239DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) 264DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
240DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) 265DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
241DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) 266DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
267#ifdef CONFIG_X86_32
242DO_ERROR(12, SIGBUS, "stack segment", stack_segment) 268DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
269#endif
243DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) 270DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
244 271
272#ifdef CONFIG_X86_64
273/* Runs on IST stack */
274dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
275{
276 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
277 12, SIGBUS) == NOTIFY_STOP)
278 return;
279 preempt_conditional_sti(regs);
280 do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
281 preempt_conditional_cli(regs);
282}
283
284dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
285{
286 static const char str[] = "double fault";
287 struct task_struct *tsk = current;
288
289 /* Return not checked because double check cannot be ignored */
290 notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
291
292 tsk->thread.error_code = error_code;
293 tsk->thread.trap_no = 8;
294
295 /* This is always a kernel trap and never fixable (and thus must
296 never return). */
297 for (;;)
298 die(str, regs, error_code);
299}
300#endif
301
245dotraplinkage void __kprobes 302dotraplinkage void __kprobes
246do_general_protection(struct pt_regs *regs, long error_code) 303do_general_protection(struct pt_regs *regs, long error_code)
247{ 304{
@@ -249,6 +306,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
249 306
250 conditional_sti(regs); 307 conditional_sti(regs);
251 308
309#ifdef CONFIG_X86_32
252 if (lazy_iobitmap_copy()) { 310 if (lazy_iobitmap_copy()) {
253 /* restart the faulting instruction */ 311 /* restart the faulting instruction */
254 return; 312 return;
@@ -256,6 +314,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
256 314
257 if (regs->flags & X86_VM_MASK) 315 if (regs->flags & X86_VM_MASK)
258 goto gp_in_vm86; 316 goto gp_in_vm86;
317#endif
259 318
260 tsk = current; 319 tsk = current;
261 if (!user_mode(regs)) 320 if (!user_mode(regs))
@@ -277,10 +336,12 @@ do_general_protection(struct pt_regs *regs, long error_code)
277 force_sig(SIGSEGV, tsk); 336 force_sig(SIGSEGV, tsk);
278 return; 337 return;
279 338
339#ifdef CONFIG_X86_32
280gp_in_vm86: 340gp_in_vm86:
281 local_irq_enable(); 341 local_irq_enable();
282 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); 342 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
283 return; 343 return;
344#endif
284 345
285gp_in_kernel: 346gp_in_kernel:
286 if (fixup_exception(regs)) 347 if (fixup_exception(regs))
@@ -368,6 +429,7 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
368 printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); 429 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
369} 430}
370 431
432#ifdef CONFIG_X86_32
371static DEFINE_SPINLOCK(nmi_print_lock); 433static DEFINE_SPINLOCK(nmi_print_lock);
372 434
373void notrace __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic) 435void notrace __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
@@ -402,6 +464,7 @@ void notrace __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
402 464
403 do_exit(SIGSEGV); 465 do_exit(SIGSEGV);
404} 466}
467#endif
405 468
406static notrace __kprobes void default_do_nmi(struct pt_regs *regs) 469static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
407{ 470{
@@ -441,11 +504,13 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
441 mem_parity_error(reason, regs); 504 mem_parity_error(reason, regs);
442 if (reason & 0x40) 505 if (reason & 0x40)
443 io_check_error(reason, regs); 506 io_check_error(reason, regs);
507#ifdef CONFIG_X86_32
444 /* 508 /*
445 * Reassert NMI in case it became active meanwhile 509 * Reassert NMI in case it became active meanwhile
446 * as it's edge-triggered: 510 * as it's edge-triggered:
447 */ 511 */
448 reassert_nmi(); 512 reassert_nmi();
513#endif
449} 514}
450 515
451dotraplinkage notrace __kprobes void 516dotraplinkage notrace __kprobes void
@@ -453,7 +518,11 @@ do_nmi(struct pt_regs *regs, long error_code)
453{ 518{
454 nmi_enter(); 519 nmi_enter();
455 520
521#ifdef CONFIG_X86_32
456 { int cpu; cpu = smp_processor_id(); ++nmi_count(cpu); } 522 { int cpu; cpu = smp_processor_id(); ++nmi_count(cpu); }
523#else
524 add_pda(__nmi_count, 1);
525#endif
457 526
458 if (!ignore_nmis) 527 if (!ignore_nmis)
459 default_do_nmi(regs); 528 default_do_nmi(regs);
@@ -491,6 +560,29 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
491 preempt_conditional_cli(regs); 560 preempt_conditional_cli(regs);
492} 561}
493 562
563#ifdef CONFIG_X86_64
564/* Help handler running on IST stack to switch back to user stack
565 for scheduling or signal handling. The actual stack switch is done in
566 entry.S */
567asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
568{
569 struct pt_regs *regs = eregs;
570 /* Did already sync */
571 if (eregs == (struct pt_regs *)eregs->sp)
572 ;
573 /* Exception from user space */
574 else if (user_mode(eregs))
575 regs = task_pt_regs(current);
576 /* Exception from kernel and interrupts are enabled. Move to
577 kernel process stack. */
578 else if (eregs->flags & X86_EFLAGS_IF)
579 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
580 if (eregs != regs)
581 *regs = *eregs;
582 return regs;
583}
584#endif
585
494/* 586/*
495 * Our handling of the processor debug registers is non-trivial. 587 * Our handling of the processor debug registers is non-trivial.
496 * We do not clear them on entry and exit from the kernel. Therefore 588 * We do not clear them on entry and exit from the kernel. Therefore
@@ -542,8 +634,10 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
542 goto clear_dr7; 634 goto clear_dr7;
543 } 635 }
544 636
637#ifdef CONFIG_X86_32
545 if (regs->flags & X86_VM_MASK) 638 if (regs->flags & X86_VM_MASK)
546 goto debug_vm86; 639 goto debug_vm86;
640#endif
547 641
548 /* Save debug status register where ptrace can see it */ 642 /* Save debug status register where ptrace can see it */
549 tsk->thread.debugreg6 = condition; 643 tsk->thread.debugreg6 = condition;
@@ -570,10 +664,12 @@ clear_dr7:
570 preempt_conditional_cli(regs); 664 preempt_conditional_cli(regs);
571 return; 665 return;
572 666
667#ifdef CONFIG_X86_32
573debug_vm86: 668debug_vm86:
574 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); 669 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
575 preempt_conditional_cli(regs); 670 preempt_conditional_cli(regs);
576 return; 671 return;
672#endif
577 673
578clear_TF_reenable: 674clear_TF_reenable:
579 set_tsk_thread_flag(tsk, TIF_SINGLESTEP); 675 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
@@ -582,6 +678,20 @@ clear_TF_reenable:
582 return; 678 return;
583} 679}
584 680
681#ifdef CONFIG_X86_64
682static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
683{
684 if (fixup_exception(regs))
685 return 1;
686
687 notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
688 /* Illegal floating point operation in the kernel */
689 current->thread.trap_no = trapnr;
690 die(str, regs, 0);
691 return 0;
692}
693#endif
694
585/* 695/*
586 * Note that we play around with the 'TS' bit in an attempt to get 696 * Note that we play around with the 'TS' bit in an attempt to get
587 * the correct behaviour even in the presence of the asynchronous 697 * the correct behaviour even in the presence of the asynchronous
@@ -618,7 +728,9 @@ void math_error(void __user *ip)
618 swd = get_fpu_swd(task); 728 swd = get_fpu_swd(task);
619 switch (swd & ~cwd & 0x3f) { 729 switch (swd & ~cwd & 0x3f) {
620 case 0x000: /* No unmasked exception */ 730 case 0x000: /* No unmasked exception */
731#ifdef CONFIG_X86_32
621 return; 732 return;
733#endif
622 default: /* Multiple exceptions */ 734 default: /* Multiple exceptions */
623 break; 735 break;
624 case 0x001: /* Invalid Op */ 736 case 0x001: /* Invalid Op */
@@ -649,7 +761,15 @@ void math_error(void __user *ip)
649dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) 761dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
650{ 762{
651 conditional_sti(regs); 763 conditional_sti(regs);
764
765#ifdef CONFIG_X86_32
652 ignore_fpu_irq = 1; 766 ignore_fpu_irq = 1;
767#else
768 if (!user_mode(regs) &&
769 kernel_math_error(regs, "kernel x87 math error", 16))
770 return;
771#endif
772
653 math_error((void __user *)regs->ip); 773 math_error((void __user *)regs->ip);
654} 774}
655 775
@@ -706,6 +826,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
706{ 826{
707 conditional_sti(regs); 827 conditional_sti(regs);
708 828
829#ifdef CONFIG_X86_32
709 if (cpu_has_xmm) { 830 if (cpu_has_xmm) {
710 /* Handle SIMD FPU exceptions on PIII+ processors. */ 831 /* Handle SIMD FPU exceptions on PIII+ processors. */
711 ignore_fpu_irq = 1; 832 ignore_fpu_irq = 1;
@@ -724,6 +845,12 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
724 current->thread.error_code = error_code; 845 current->thread.error_code = error_code;
725 die_if_kernel("cache flush denied", regs, error_code); 846 die_if_kernel("cache flush denied", regs, error_code);
726 force_sig(SIGSEGV, current); 847 force_sig(SIGSEGV, current);
848#else
849 if (!user_mode(regs) &&
850 kernel_math_error(regs, "kernel simd math error", 19))
851 return;
852 simd_math_error((void __user *)regs->ip);
853#endif
727} 854}
728 855
729dotraplinkage void 856dotraplinkage void
@@ -736,6 +863,7 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
736#endif 863#endif
737} 864}
738 865
866#ifdef CONFIG_X86_32
739unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp) 867unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp)
740{ 868{
741 struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id()); 869 struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id());
@@ -754,6 +882,15 @@ unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp)
754 882
755 return new_kesp; 883 return new_kesp;
756} 884}
885#else
886asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
887{
888}
889
890asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
891{
892}
893#endif
757 894
758/* 895/*
759 * 'math_state_restore()' saves the current math information in the 896 * 'math_state_restore()' saves the current math information in the
@@ -786,14 +923,24 @@ asmlinkage void math_state_restore(void)
786 } 923 }
787 924
788 clts(); /* Allow maths ops (or we recurse) */ 925 clts(); /* Allow maths ops (or we recurse) */
926#ifdef CONFIG_X86_32
789 restore_fpu(tsk); 927 restore_fpu(tsk);
928#else
929 /*
930 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
931 */
932 if (unlikely(restore_fpu_checking(tsk))) {
933 stts();
934 force_sig(SIGSEGV, tsk);
935 return;
936 }
937#endif
790 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ 938 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
791 tsk->fpu_counter++; 939 tsk->fpu_counter++;
792} 940}
793EXPORT_SYMBOL_GPL(math_state_restore); 941EXPORT_SYMBOL_GPL(math_state_restore);
794 942
795#ifndef CONFIG_MATH_EMULATION 943#ifndef CONFIG_MATH_EMULATION
796
797asmlinkage void math_emulate(long arg) 944asmlinkage void math_emulate(long arg)
798{ 945{
799 printk(KERN_EMERG 946 printk(KERN_EMERG
@@ -802,12 +949,12 @@ asmlinkage void math_emulate(long arg)
802 force_sig(SIGFPE, current); 949 force_sig(SIGFPE, current);
803 schedule(); 950 schedule();
804} 951}
805
806#endif /* CONFIG_MATH_EMULATION */ 952#endif /* CONFIG_MATH_EMULATION */
807 953
808dotraplinkage void __kprobes 954dotraplinkage void __kprobes
809do_device_not_available(struct pt_regs *regs, long error) 955do_device_not_available(struct pt_regs *regs, long error)
810{ 956{
957#ifdef CONFIG_X86_32
811 if (read_cr0() & X86_CR0_EM) { 958 if (read_cr0() & X86_CR0_EM) {
812 conditional_sti(regs); 959 conditional_sti(regs);
813 math_emulate(0); 960 math_emulate(0);
@@ -815,8 +962,12 @@ do_device_not_available(struct pt_regs *regs, long error)
815 math_state_restore(); /* interrupts still off */ 962 math_state_restore(); /* interrupts still off */
816 conditional_sti(regs); 963 conditional_sti(regs);
817 } 964 }
965#else
966 math_state_restore();
967#endif
818} 968}
819 969
970#ifdef CONFIG_X86_32
820#ifdef CONFIG_X86_MCE 971#ifdef CONFIG_X86_MCE
821dotraplinkage void __kprobes do_machine_check(struct pt_regs *regs, long error) 972dotraplinkage void __kprobes do_machine_check(struct pt_regs *regs, long error)
822{ 973{
@@ -839,10 +990,13 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
839 return; 990 return;
840 do_trap(32, SIGILL, "iret exception", regs, error_code, &info); 991 do_trap(32, SIGILL, "iret exception", regs, error_code, &info);
841} 992}
993#endif
842 994
843void __init trap_init(void) 995void __init trap_init(void)
844{ 996{
997#ifdef CONFIG_X86_32
845 int i; 998 int i;
999#endif
846 1000
847#ifdef CONFIG_EISA 1001#ifdef CONFIG_EISA
848 void __iomem *p = early_ioremap(0x0FFFD9, 4); 1002 void __iomem *p = early_ioremap(0x0FFFD9, 4);
@@ -862,7 +1016,11 @@ void __init trap_init(void)
862 set_intr_gate(5, &bounds); 1016 set_intr_gate(5, &bounds);
863 set_intr_gate(6, &invalid_op); 1017 set_intr_gate(6, &invalid_op);
864 set_intr_gate(7, &device_not_available); 1018 set_intr_gate(7, &device_not_available);
1019#ifdef CONFIG_X86_32
865 set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS); 1020 set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS);
1021#else
1022 set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK);
1023#endif
866 set_intr_gate(9, &coprocessor_segment_overrun); 1024 set_intr_gate(9, &coprocessor_segment_overrun);
867 set_intr_gate(10, &invalid_TSS); 1025 set_intr_gate(10, &invalid_TSS);
868 set_intr_gate(11, &segment_not_present); 1026 set_intr_gate(11, &segment_not_present);
@@ -877,6 +1035,11 @@ void __init trap_init(void)
877#endif 1035#endif
878 set_intr_gate(19, &simd_coprocessor_error); 1036 set_intr_gate(19, &simd_coprocessor_error);
879 1037
1038#ifdef CONFIG_IA32_EMULATION
1039 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
1040#endif
1041
1042#ifdef CONFIG_X86_32
880 if (cpu_has_fxsr) { 1043 if (cpu_has_fxsr) {
881 printk(KERN_INFO "Enabling fast FPU save and restore... "); 1044 printk(KERN_INFO "Enabling fast FPU save and restore... ");
882 set_in_cr4(X86_CR4_OSFXSR); 1045 set_in_cr4(X86_CR4_OSFXSR);
@@ -896,11 +1059,13 @@ void __init trap_init(void)
896 set_bit(i, used_vectors); 1059 set_bit(i, used_vectors);
897 1060
898 set_bit(SYSCALL_VECTOR, used_vectors); 1061 set_bit(SYSCALL_VECTOR, used_vectors);
899 1062#endif
900 /* 1063 /*
901 * Should be a barrier for any external CPU state: 1064 * Should be a barrier for any external CPU state:
902 */ 1065 */
903 cpu_init(); 1066 cpu_init();
904 1067
1068#ifdef CONFIG_X86_32
905 trap_init_hook(); 1069 trap_init_hook();
1070#endif
906} 1071}