diff options
author | Alexander van Heukelum <heukelum@fastmail.fm> | 2008-10-03 16:00:40 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-13 04:33:29 -0400 |
commit | 8728861b4fead8119a1b7bb856a387320859cd98 (patch) | |
tree | 03acdb0dccde800696409b6f59f8d5248c4da0f6 /arch/x86 | |
parent | 081f75bbdc86de53537e1b5aca01de72bd2fea6b (diff) |
traps: x86: finalize unification of traps.c
traps_32.c and traps_64.c are now equal. Move one to traps.c,
delete the other one and change the Makefile
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/kernel/traps.c (renamed from arch/x86/kernel/traps_32.c) | 0 | ||||
-rw-r--r-- | arch/x86/kernel/traps_64.c | 1070 |
3 files changed, 1 insertions, 1071 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index de63fed9fae8..0d41f0343dc0 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -23,7 +23,7 @@ CFLAGS_hpet.o := $(nostackp) | |||
23 | CFLAGS_tsc.o := $(nostackp) | 23 | CFLAGS_tsc.o := $(nostackp) |
24 | 24 | ||
25 | obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o | 25 | obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o |
26 | obj-y += traps_$(BITS).o irq_$(BITS).o dumpstack_$(BITS).o | 26 | obj-y += traps.o irq_$(BITS).o dumpstack_$(BITS).o |
27 | obj-y += time_$(BITS).o ioport.o ldt.o | 27 | obj-y += time_$(BITS).o ioport.o ldt.o |
28 | obj-y += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o | 28 | obj-y += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o |
29 | obj-$(CONFIG_X86_VISWS) += visws_quirks.o | 29 | obj-$(CONFIG_X86_VISWS) += visws_quirks.o |
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps.c index ffb131f74f78..ffb131f74f78 100644 --- a/arch/x86/kernel/traps_32.c +++ b/arch/x86/kernel/traps.c | |||
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c deleted file mode 100644 index 60ecc855ab81..000000000000 --- a/arch/x86/kernel/traps_64.c +++ /dev/null | |||
@@ -1,1070 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs | ||
4 | * | ||
5 | * Pentium III FXSR, SSE support | ||
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | ||
7 | */ | ||
8 | |||
9 | /* | ||
10 | * Handle hardware traps and faults. | ||
11 | */ | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/kallsyms.h> | ||
14 | #include <linux/spinlock.h> | ||
15 | #include <linux/kprobes.h> | ||
16 | #include <linux/uaccess.h> | ||
17 | #include <linux/utsname.h> | ||
18 | #include <linux/kdebug.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/ptrace.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/unwind.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/errno.h> | ||
26 | #include <linux/kexec.h> | ||
27 | #include <linux/sched.h> | ||
28 | #include <linux/timer.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/bug.h> | ||
31 | #include <linux/nmi.h> | ||
32 | #include <linux/mm.h> | ||
33 | #include <linux/smp.h> | ||
34 | #include <linux/io.h> | ||
35 | |||
36 | #ifdef CONFIG_EISA | ||
37 | #include <linux/ioport.h> | ||
38 | #include <linux/eisa.h> | ||
39 | #endif | ||
40 | |||
41 | #ifdef CONFIG_MCA | ||
42 | #include <linux/mca.h> | ||
43 | #endif | ||
44 | |||
45 | #if defined(CONFIG_EDAC) | ||
46 | #include <linux/edac.h> | ||
47 | #endif | ||
48 | |||
49 | #include <asm/stacktrace.h> | ||
50 | #include <asm/processor.h> | ||
51 | #include <asm/debugreg.h> | ||
52 | #include <asm/atomic.h> | ||
53 | #include <asm/system.h> | ||
54 | #include <asm/unwind.h> | ||
55 | #include <asm/traps.h> | ||
56 | #include <asm/desc.h> | ||
57 | #include <asm/i387.h> | ||
58 | |||
59 | #include <mach_traps.h> | ||
60 | |||
61 | #ifdef CONFIG_X86_64 | ||
62 | #include <asm/pgalloc.h> | ||
63 | #include <asm/proto.h> | ||
64 | #include <asm/pda.h> | ||
65 | #else | ||
66 | #include <asm/processor-flags.h> | ||
67 | #include <asm/arch_hooks.h> | ||
68 | #include <asm/nmi.h> | ||
69 | #include <asm/smp.h> | ||
70 | #include <asm/io.h> | ||
71 | |||
72 | #include "cpu/mcheck/mce.h" | ||
73 | |||
74 | DECLARE_BITMAP(used_vectors, NR_VECTORS); | ||
75 | EXPORT_SYMBOL_GPL(used_vectors); | ||
76 | |||
77 | asmlinkage int system_call(void); | ||
78 | |||
79 | /* Do we ignore FPU interrupts ? */ | ||
80 | char ignore_fpu_irq; | ||
81 | |||
82 | /* | ||
83 | * The IDT has to be page-aligned to simplify the Pentium | ||
84 | * F0 0F bug workaround.. We have a special link segment | ||
85 | * for this. | ||
86 | */ | ||
87 | gate_desc idt_table[256] | ||
88 | __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; | ||
89 | #endif | ||
90 | |||
91 | static int ignore_nmis; | ||
92 | |||
93 | static inline void conditional_sti(struct pt_regs *regs) | ||
94 | { | ||
95 | if (regs->flags & X86_EFLAGS_IF) | ||
96 | local_irq_enable(); | ||
97 | } | ||
98 | |||
99 | static inline void preempt_conditional_sti(struct pt_regs *regs) | ||
100 | { | ||
101 | inc_preempt_count(); | ||
102 | if (regs->flags & X86_EFLAGS_IF) | ||
103 | local_irq_enable(); | ||
104 | } | ||
105 | |||
106 | static inline void preempt_conditional_cli(struct pt_regs *regs) | ||
107 | { | ||
108 | if (regs->flags & X86_EFLAGS_IF) | ||
109 | local_irq_disable(); | ||
110 | dec_preempt_count(); | ||
111 | } | ||
112 | |||
113 | #ifdef CONFIG_X86_32 | ||
114 | static inline void | ||
115 | die_if_kernel(const char *str, struct pt_regs *regs, long err) | ||
116 | { | ||
117 | if (!user_mode_vm(regs)) | ||
118 | die(str, regs, err); | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * Perform the lazy TSS's I/O bitmap copy. If the TSS has an | ||
123 | * invalid offset set (the LAZY one) and the faulting thread has | ||
124 | * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS, | ||
125 | * we set the offset field correctly and return 1. | ||
126 | */ | ||
127 | static int lazy_iobitmap_copy(void) | ||
128 | { | ||
129 | struct thread_struct *thread; | ||
130 | struct tss_struct *tss; | ||
131 | int cpu; | ||
132 | |||
133 | cpu = get_cpu(); | ||
134 | tss = &per_cpu(init_tss, cpu); | ||
135 | thread = ¤t->thread; | ||
136 | |||
137 | if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY && | ||
138 | thread->io_bitmap_ptr) { | ||
139 | memcpy(tss->io_bitmap, thread->io_bitmap_ptr, | ||
140 | thread->io_bitmap_max); | ||
141 | /* | ||
142 | * If the previously set map was extending to higher ports | ||
143 | * than the current one, pad extra space with 0xff (no access). | ||
144 | */ | ||
145 | if (thread->io_bitmap_max < tss->io_bitmap_max) { | ||
146 | memset((char *) tss->io_bitmap + | ||
147 | thread->io_bitmap_max, 0xff, | ||
148 | tss->io_bitmap_max - thread->io_bitmap_max); | ||
149 | } | ||
150 | tss->io_bitmap_max = thread->io_bitmap_max; | ||
151 | tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; | ||
152 | tss->io_bitmap_owner = thread; | ||
153 | put_cpu(); | ||
154 | |||
155 | return 1; | ||
156 | } | ||
157 | put_cpu(); | ||
158 | |||
159 | return 0; | ||
160 | } | ||
161 | #endif | ||
162 | |||
163 | static void __kprobes | ||
164 | do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, | ||
165 | long error_code, siginfo_t *info) | ||
166 | { | ||
167 | struct task_struct *tsk = current; | ||
168 | |||
169 | #ifdef CONFIG_X86_32 | ||
170 | if (regs->flags & X86_VM_MASK) { | ||
171 | /* | ||
172 | * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. | ||
173 | * On nmi (interrupt 2), do_trap should not be called. | ||
174 | */ | ||
175 | if (trapnr < 6) | ||
176 | goto vm86_trap; | ||
177 | goto trap_signal; | ||
178 | } | ||
179 | #endif | ||
180 | |||
181 | if (!user_mode(regs)) | ||
182 | goto kernel_trap; | ||
183 | |||
184 | #ifdef CONFIG_X86_32 | ||
185 | trap_signal: | ||
186 | #endif | ||
187 | /* | ||
188 | * We want error_code and trap_no set for userspace faults and | ||
189 | * kernelspace faults which result in die(), but not | ||
190 | * kernelspace faults which are fixed up. die() gives the | ||
191 | * process no chance to handle the signal and notice the | ||
192 | * kernel fault information, so that won't result in polluting | ||
193 | * the information about previously queued, but not yet | ||
194 | * delivered, faults. See also do_general_protection below. | ||
195 | */ | ||
196 | tsk->thread.error_code = error_code; | ||
197 | tsk->thread.trap_no = trapnr; | ||
198 | |||
199 | #ifdef CONFIG_X86_64 | ||
200 | if (show_unhandled_signals && unhandled_signal(tsk, signr) && | ||
201 | printk_ratelimit()) { | ||
202 | printk(KERN_INFO | ||
203 | "%s[%d] trap %s ip:%lx sp:%lx error:%lx", | ||
204 | tsk->comm, tsk->pid, str, | ||
205 | regs->ip, regs->sp, error_code); | ||
206 | print_vma_addr(" in ", regs->ip); | ||
207 | printk("\n"); | ||
208 | } | ||
209 | #endif | ||
210 | |||
211 | if (info) | ||
212 | force_sig_info(signr, info, tsk); | ||
213 | else | ||
214 | force_sig(signr, tsk); | ||
215 | return; | ||
216 | |||
217 | kernel_trap: | ||
218 | if (!fixup_exception(regs)) { | ||
219 | tsk->thread.error_code = error_code; | ||
220 | tsk->thread.trap_no = trapnr; | ||
221 | die(str, regs, error_code); | ||
222 | } | ||
223 | return; | ||
224 | |||
225 | #ifdef CONFIG_X86_32 | ||
226 | vm86_trap: | ||
227 | if (handle_vm86_trap((struct kernel_vm86_regs *) regs, | ||
228 | error_code, trapnr)) | ||
229 | goto trap_signal; | ||
230 | return; | ||
231 | #endif | ||
232 | } | ||
233 | |||
234 | #define DO_ERROR(trapnr, signr, str, name) \ | ||
235 | dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ | ||
236 | { \ | ||
237 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | ||
238 | == NOTIFY_STOP) \ | ||
239 | return; \ | ||
240 | conditional_sti(regs); \ | ||
241 | do_trap(trapnr, signr, str, regs, error_code, NULL); \ | ||
242 | } | ||
243 | |||
244 | #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ | ||
245 | dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ | ||
246 | { \ | ||
247 | siginfo_t info; \ | ||
248 | info.si_signo = signr; \ | ||
249 | info.si_errno = 0; \ | ||
250 | info.si_code = sicode; \ | ||
251 | info.si_addr = (void __user *)siaddr; \ | ||
252 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | ||
253 | == NOTIFY_STOP) \ | ||
254 | return; \ | ||
255 | conditional_sti(regs); \ | ||
256 | do_trap(trapnr, signr, str, regs, error_code, &info); \ | ||
257 | } | ||
258 | |||
259 | DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) | ||
260 | DO_ERROR(4, SIGSEGV, "overflow", overflow) | ||
261 | DO_ERROR(5, SIGSEGV, "bounds", bounds) | ||
262 | DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip) | ||
263 | DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) | ||
264 | DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) | ||
265 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) | ||
266 | #ifdef CONFIG_X86_32 | ||
267 | DO_ERROR(12, SIGBUS, "stack segment", stack_segment) | ||
268 | #endif | ||
269 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) | ||
270 | |||
271 | #ifdef CONFIG_X86_64 | ||
272 | /* Runs on IST stack */ | ||
273 | dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) | ||
274 | { | ||
275 | if (notify_die(DIE_TRAP, "stack segment", regs, error_code, | ||
276 | 12, SIGBUS) == NOTIFY_STOP) | ||
277 | return; | ||
278 | preempt_conditional_sti(regs); | ||
279 | do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL); | ||
280 | preempt_conditional_cli(regs); | ||
281 | } | ||
282 | |||
283 | dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) | ||
284 | { | ||
285 | static const char str[] = "double fault"; | ||
286 | struct task_struct *tsk = current; | ||
287 | |||
288 | /* Return not checked because double check cannot be ignored */ | ||
289 | notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV); | ||
290 | |||
291 | tsk->thread.error_code = error_code; | ||
292 | tsk->thread.trap_no = 8; | ||
293 | |||
294 | /* This is always a kernel trap and never fixable (and thus must | ||
295 | never return). */ | ||
296 | for (;;) | ||
297 | die(str, regs, error_code); | ||
298 | } | ||
299 | #endif | ||
300 | |||
301 | dotraplinkage void __kprobes | ||
302 | do_general_protection(struct pt_regs *regs, long error_code) | ||
303 | { | ||
304 | struct task_struct *tsk; | ||
305 | |||
306 | conditional_sti(regs); | ||
307 | |||
308 | #ifdef CONFIG_X86_32 | ||
309 | if (lazy_iobitmap_copy()) { | ||
310 | /* restart the faulting instruction */ | ||
311 | return; | ||
312 | } | ||
313 | |||
314 | if (regs->flags & X86_VM_MASK) | ||
315 | goto gp_in_vm86; | ||
316 | #endif | ||
317 | |||
318 | tsk = current; | ||
319 | if (!user_mode(regs)) | ||
320 | goto gp_in_kernel; | ||
321 | |||
322 | tsk->thread.error_code = error_code; | ||
323 | tsk->thread.trap_no = 13; | ||
324 | |||
325 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && | ||
326 | printk_ratelimit()) { | ||
327 | printk(KERN_INFO | ||
328 | "%s[%d] general protection ip:%lx sp:%lx error:%lx", | ||
329 | tsk->comm, task_pid_nr(tsk), | ||
330 | regs->ip, regs->sp, error_code); | ||
331 | print_vma_addr(" in ", regs->ip); | ||
332 | printk("\n"); | ||
333 | } | ||
334 | |||
335 | force_sig(SIGSEGV, tsk); | ||
336 | return; | ||
337 | |||
338 | #ifdef CONFIG_X86_32 | ||
339 | gp_in_vm86: | ||
340 | local_irq_enable(); | ||
341 | handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); | ||
342 | return; | ||
343 | #endif | ||
344 | |||
345 | gp_in_kernel: | ||
346 | if (fixup_exception(regs)) | ||
347 | return; | ||
348 | |||
349 | tsk->thread.error_code = error_code; | ||
350 | tsk->thread.trap_no = 13; | ||
351 | if (notify_die(DIE_GPF, "general protection fault", regs, | ||
352 | error_code, 13, SIGSEGV) == NOTIFY_STOP) | ||
353 | return; | ||
354 | die("general protection fault", regs, error_code); | ||
355 | } | ||
356 | |||
357 | static notrace __kprobes void | ||
358 | mem_parity_error(unsigned char reason, struct pt_regs *regs) | ||
359 | { | ||
360 | printk(KERN_EMERG | ||
361 | "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", | ||
362 | reason, smp_processor_id()); | ||
363 | |||
364 | printk(KERN_EMERG | ||
365 | "You have some hardware problem, likely on the PCI bus.\n"); | ||
366 | |||
367 | #if defined(CONFIG_EDAC) | ||
368 | if (edac_handler_set()) { | ||
369 | edac_atomic_assert_error(); | ||
370 | return; | ||
371 | } | ||
372 | #endif | ||
373 | |||
374 | if (panic_on_unrecovered_nmi) | ||
375 | panic("NMI: Not continuing"); | ||
376 | |||
377 | printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); | ||
378 | |||
379 | /* Clear and disable the memory parity error line. */ | ||
380 | reason = (reason & 0xf) | 4; | ||
381 | outb(reason, 0x61); | ||
382 | } | ||
383 | |||
384 | static notrace __kprobes void | ||
385 | io_check_error(unsigned char reason, struct pt_regs *regs) | ||
386 | { | ||
387 | unsigned long i; | ||
388 | |||
389 | printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n"); | ||
390 | show_registers(regs); | ||
391 | |||
392 | /* Re-enable the IOCK line, wait for a few seconds */ | ||
393 | reason = (reason & 0xf) | 8; | ||
394 | outb(reason, 0x61); | ||
395 | |||
396 | i = 2000; | ||
397 | while (--i) | ||
398 | udelay(1000); | ||
399 | |||
400 | reason &= ~8; | ||
401 | outb(reason, 0x61); | ||
402 | } | ||
403 | |||
404 | static notrace __kprobes void | ||
405 | unknown_nmi_error(unsigned char reason, struct pt_regs *regs) | ||
406 | { | ||
407 | if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == | ||
408 | NOTIFY_STOP) | ||
409 | return; | ||
410 | #ifdef CONFIG_MCA | ||
411 | /* | ||
412 | * Might actually be able to figure out what the guilty party | ||
413 | * is: | ||
414 | */ | ||
415 | if (MCA_bus) { | ||
416 | mca_handle_nmi(); | ||
417 | return; | ||
418 | } | ||
419 | #endif | ||
420 | printk(KERN_EMERG | ||
421 | "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", | ||
422 | reason, smp_processor_id()); | ||
423 | |||
424 | printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n"); | ||
425 | if (panic_on_unrecovered_nmi) | ||
426 | panic("NMI: Not continuing"); | ||
427 | |||
428 | printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); | ||
429 | } | ||
430 | |||
431 | #ifdef CONFIG_X86_32 | ||
432 | static DEFINE_SPINLOCK(nmi_print_lock); | ||
433 | |||
434 | void notrace __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic) | ||
435 | { | ||
436 | if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP) | ||
437 | return; | ||
438 | |||
439 | spin_lock(&nmi_print_lock); | ||
440 | /* | ||
441 | * We are in trouble anyway, lets at least try | ||
442 | * to get a message out: | ||
443 | */ | ||
444 | bust_spinlocks(1); | ||
445 | printk(KERN_EMERG "%s", str); | ||
446 | printk(" on CPU%d, ip %08lx, registers:\n", | ||
447 | smp_processor_id(), regs->ip); | ||
448 | show_registers(regs); | ||
449 | if (do_panic) | ||
450 | panic("Non maskable interrupt"); | ||
451 | console_silent(); | ||
452 | spin_unlock(&nmi_print_lock); | ||
453 | bust_spinlocks(0); | ||
454 | |||
455 | /* | ||
456 | * If we are in kernel we are probably nested up pretty bad | ||
457 | * and might aswell get out now while we still can: | ||
458 | */ | ||
459 | if (!user_mode_vm(regs)) { | ||
460 | current->thread.trap_no = 2; | ||
461 | crash_kexec(regs); | ||
462 | } | ||
463 | |||
464 | do_exit(SIGSEGV); | ||
465 | } | ||
466 | #endif | ||
467 | |||
468 | static notrace __kprobes void default_do_nmi(struct pt_regs *regs) | ||
469 | { | ||
470 | unsigned char reason = 0; | ||
471 | int cpu; | ||
472 | |||
473 | cpu = smp_processor_id(); | ||
474 | |||
475 | /* Only the BSP gets external NMIs from the system. */ | ||
476 | if (!cpu) | ||
477 | reason = get_nmi_reason(); | ||
478 | |||
479 | if (!(reason & 0xc0)) { | ||
480 | if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT) | ||
481 | == NOTIFY_STOP) | ||
482 | return; | ||
483 | #ifdef CONFIG_X86_LOCAL_APIC | ||
484 | /* | ||
485 | * Ok, so this is none of the documented NMI sources, | ||
486 | * so it must be the NMI watchdog. | ||
487 | */ | ||
488 | if (nmi_watchdog_tick(regs, reason)) | ||
489 | return; | ||
490 | if (!do_nmi_callback(regs, cpu)) | ||
491 | unknown_nmi_error(reason, regs); | ||
492 | #else | ||
493 | unknown_nmi_error(reason, regs); | ||
494 | #endif | ||
495 | |||
496 | return; | ||
497 | } | ||
498 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) | ||
499 | return; | ||
500 | |||
501 | /* AK: following checks seem to be broken on modern chipsets. FIXME */ | ||
502 | if (reason & 0x80) | ||
503 | mem_parity_error(reason, regs); | ||
504 | if (reason & 0x40) | ||
505 | io_check_error(reason, regs); | ||
506 | #ifdef CONFIG_X86_32 | ||
507 | /* | ||
508 | * Reassert NMI in case it became active meanwhile | ||
509 | * as it's edge-triggered: | ||
510 | */ | ||
511 | reassert_nmi(); | ||
512 | #endif | ||
513 | } | ||
514 | |||
515 | dotraplinkage notrace __kprobes void | ||
516 | do_nmi(struct pt_regs *regs, long error_code) | ||
517 | { | ||
518 | nmi_enter(); | ||
519 | |||
520 | #ifdef CONFIG_X86_32 | ||
521 | { int cpu; cpu = smp_processor_id(); ++nmi_count(cpu); } | ||
522 | #else | ||
523 | add_pda(__nmi_count, 1); | ||
524 | #endif | ||
525 | |||
526 | if (!ignore_nmis) | ||
527 | default_do_nmi(regs); | ||
528 | |||
529 | nmi_exit(); | ||
530 | } | ||
531 | |||
532 | void stop_nmi(void) | ||
533 | { | ||
534 | acpi_nmi_disable(); | ||
535 | ignore_nmis++; | ||
536 | } | ||
537 | |||
538 | void restart_nmi(void) | ||
539 | { | ||
540 | ignore_nmis--; | ||
541 | acpi_nmi_enable(); | ||
542 | } | ||
543 | |||
544 | /* May run on IST stack. */ | ||
545 | dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) | ||
546 | { | ||
547 | #ifdef CONFIG_KPROBES | ||
548 | if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) | ||
549 | == NOTIFY_STOP) | ||
550 | return; | ||
551 | #else | ||
552 | if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP) | ||
553 | == NOTIFY_STOP) | ||
554 | return; | ||
555 | #endif | ||
556 | |||
557 | preempt_conditional_sti(regs); | ||
558 | do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); | ||
559 | preempt_conditional_cli(regs); | ||
560 | } | ||
561 | |||
562 | #ifdef CONFIG_X86_64 | ||
563 | /* Help handler running on IST stack to switch back to user stack | ||
564 | for scheduling or signal handling. The actual stack switch is done in | ||
565 | entry.S */ | ||
566 | asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) | ||
567 | { | ||
568 | struct pt_regs *regs = eregs; | ||
569 | /* Did already sync */ | ||
570 | if (eregs == (struct pt_regs *)eregs->sp) | ||
571 | ; | ||
572 | /* Exception from user space */ | ||
573 | else if (user_mode(eregs)) | ||
574 | regs = task_pt_regs(current); | ||
575 | /* Exception from kernel and interrupts are enabled. Move to | ||
576 | kernel process stack. */ | ||
577 | else if (eregs->flags & X86_EFLAGS_IF) | ||
578 | regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); | ||
579 | if (eregs != regs) | ||
580 | *regs = *eregs; | ||
581 | return regs; | ||
582 | } | ||
583 | #endif | ||
584 | |||
585 | /* | ||
586 | * Our handling of the processor debug registers is non-trivial. | ||
587 | * We do not clear them on entry and exit from the kernel. Therefore | ||
588 | * it is possible to get a watchpoint trap here from inside the kernel. | ||
589 | * However, the code in ./ptrace.c has ensured that the user can | ||
590 | * only set watchpoints on userspace addresses. Therefore the in-kernel | ||
591 | * watchpoint trap can only occur in code which is reading/writing | ||
592 | * from user space. Such code must not hold kernel locks (since it | ||
593 | * can equally take a page fault), therefore it is safe to call | ||
594 | * force_sig_info even though that claims and releases locks. | ||
595 | * | ||
596 | * Code in ./signal.c ensures that the debug control register | ||
597 | * is restored before we deliver any signal, and therefore that | ||
598 | * user code runs with the correct debug control register even though | ||
599 | * we clear it here. | ||
600 | * | ||
601 | * Being careful here means that we don't have to be as careful in a | ||
602 | * lot of more complicated places (task switching can be a bit lazy | ||
603 | * about restoring all the debug state, and ptrace doesn't have to | ||
604 | * find every occurrence of the TF bit that could be saved away even | ||
605 | * by user code) | ||
606 | * | ||
607 | * May run on IST stack. | ||
608 | */ | ||
609 | dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) | ||
610 | { | ||
611 | struct task_struct *tsk = current; | ||
612 | unsigned long condition; | ||
613 | int si_code; | ||
614 | |||
615 | get_debugreg(condition, 6); | ||
616 | |||
617 | /* | ||
618 | * The processor cleared BTF, so don't mark that we need it set. | ||
619 | */ | ||
620 | clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR); | ||
621 | tsk->thread.debugctlmsr = 0; | ||
622 | |||
623 | if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, | ||
624 | SIGTRAP) == NOTIFY_STOP) | ||
625 | return; | ||
626 | |||
627 | /* It's safe to allow irq's after DR6 has been saved */ | ||
628 | preempt_conditional_sti(regs); | ||
629 | |||
630 | /* Mask out spurious debug traps due to lazy DR7 setting */ | ||
631 | if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { | ||
632 | if (!tsk->thread.debugreg7) | ||
633 | goto clear_dr7; | ||
634 | } | ||
635 | |||
636 | #ifdef CONFIG_X86_32 | ||
637 | if (regs->flags & X86_VM_MASK) | ||
638 | goto debug_vm86; | ||
639 | #endif | ||
640 | |||
641 | /* Save debug status register where ptrace can see it */ | ||
642 | tsk->thread.debugreg6 = condition; | ||
643 | |||
644 | /* | ||
645 | * Single-stepping through TF: make sure we ignore any events in | ||
646 | * kernel space (but re-enable TF when returning to user mode). | ||
647 | */ | ||
648 | if (condition & DR_STEP) { | ||
649 | if (!user_mode(regs)) | ||
650 | goto clear_TF_reenable; | ||
651 | } | ||
652 | |||
653 | si_code = get_si_code(condition); | ||
654 | /* Ok, finally something we can handle */ | ||
655 | send_sigtrap(tsk, regs, error_code, si_code); | ||
656 | |||
657 | /* | ||
658 | * Disable additional traps. They'll be re-enabled when | ||
659 | * the signal is delivered. | ||
660 | */ | ||
661 | clear_dr7: | ||
662 | set_debugreg(0, 7); | ||
663 | preempt_conditional_cli(regs); | ||
664 | return; | ||
665 | |||
666 | #ifdef CONFIG_X86_32 | ||
667 | debug_vm86: | ||
668 | handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); | ||
669 | preempt_conditional_cli(regs); | ||
670 | return; | ||
671 | #endif | ||
672 | |||
673 | clear_TF_reenable: | ||
674 | set_tsk_thread_flag(tsk, TIF_SINGLESTEP); | ||
675 | regs->flags &= ~X86_EFLAGS_TF; | ||
676 | preempt_conditional_cli(regs); | ||
677 | return; | ||
678 | } | ||
679 | |||
680 | #ifdef CONFIG_X86_64 | ||
681 | static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr) | ||
682 | { | ||
683 | if (fixup_exception(regs)) | ||
684 | return 1; | ||
685 | |||
686 | notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE); | ||
687 | /* Illegal floating point operation in the kernel */ | ||
688 | current->thread.trap_no = trapnr; | ||
689 | die(str, regs, 0); | ||
690 | return 0; | ||
691 | } | ||
692 | #endif | ||
693 | |||
694 | /* | ||
695 | * Note that we play around with the 'TS' bit in an attempt to get | ||
696 | * the correct behaviour even in the presence of the asynchronous | ||
697 | * IRQ13 behaviour | ||
698 | */ | ||
699 | void math_error(void __user *ip) | ||
700 | { | ||
701 | struct task_struct *task; | ||
702 | siginfo_t info; | ||
703 | unsigned short cwd, swd; | ||
704 | |||
705 | /* | ||
706 | * Save the info for the exception handler and clear the error. | ||
707 | */ | ||
708 | task = current; | ||
709 | save_init_fpu(task); | ||
710 | task->thread.trap_no = 16; | ||
711 | task->thread.error_code = 0; | ||
712 | info.si_signo = SIGFPE; | ||
713 | info.si_errno = 0; | ||
714 | info.si_code = __SI_FAULT; | ||
715 | info.si_addr = ip; | ||
716 | /* | ||
717 | * (~cwd & swd) will mask out exceptions that are not set to unmasked | ||
718 | * status. 0x3f is the exception bits in these regs, 0x200 is the | ||
719 | * C1 reg you need in case of a stack fault, 0x040 is the stack | ||
720 | * fault bit. We should only be taking one exception at a time, | ||
721 | * so if this combination doesn't produce any single exception, | ||
722 | * then we have a bad program that isn't synchronizing its FPU usage | ||
723 | * and it will suffer the consequences since we won't be able to | ||
724 | * fully reproduce the context of the exception | ||
725 | */ | ||
726 | cwd = get_fpu_cwd(task); | ||
727 | swd = get_fpu_swd(task); | ||
728 | switch (swd & ~cwd & 0x3f) { | ||
729 | case 0x000: /* No unmasked exception */ | ||
730 | #ifdef CONFIG_X86_32 | ||
731 | return; | ||
732 | #endif | ||
733 | default: /* Multiple exceptions */ | ||
734 | break; | ||
735 | case 0x001: /* Invalid Op */ | ||
736 | /* | ||
737 | * swd & 0x240 == 0x040: Stack Underflow | ||
738 | * swd & 0x240 == 0x240: Stack Overflow | ||
739 | * User must clear the SF bit (0x40) if set | ||
740 | */ | ||
741 | info.si_code = FPE_FLTINV; | ||
742 | break; | ||
743 | case 0x002: /* Denormalize */ | ||
744 | case 0x010: /* Underflow */ | ||
745 | info.si_code = FPE_FLTUND; | ||
746 | break; | ||
747 | case 0x004: /* Zero Divide */ | ||
748 | info.si_code = FPE_FLTDIV; | ||
749 | break; | ||
750 | case 0x008: /* Overflow */ | ||
751 | info.si_code = FPE_FLTOVF; | ||
752 | break; | ||
753 | case 0x020: /* Precision */ | ||
754 | info.si_code = FPE_FLTRES; | ||
755 | break; | ||
756 | } | ||
757 | force_sig_info(SIGFPE, &info, task); | ||
758 | } | ||
759 | |||
760 | dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) | ||
761 | { | ||
762 | conditional_sti(regs); | ||
763 | |||
764 | #ifdef CONFIG_X86_32 | ||
765 | ignore_fpu_irq = 1; | ||
766 | #else | ||
767 | if (!user_mode(regs) && | ||
768 | kernel_math_error(regs, "kernel x87 math error", 16)) | ||
769 | return; | ||
770 | #endif | ||
771 | |||
772 | math_error((void __user *)regs->ip); | ||
773 | } | ||
774 | |||
775 | static void simd_math_error(void __user *ip) | ||
776 | { | ||
777 | struct task_struct *task; | ||
778 | siginfo_t info; | ||
779 | unsigned short mxcsr; | ||
780 | |||
781 | /* | ||
782 | * Save the info for the exception handler and clear the error. | ||
783 | */ | ||
784 | task = current; | ||
785 | save_init_fpu(task); | ||
786 | task->thread.trap_no = 19; | ||
787 | task->thread.error_code = 0; | ||
788 | info.si_signo = SIGFPE; | ||
789 | info.si_errno = 0; | ||
790 | info.si_code = __SI_FAULT; | ||
791 | info.si_addr = ip; | ||
792 | /* | ||
793 | * The SIMD FPU exceptions are handled a little differently, as there | ||
794 | * is only a single status/control register. Thus, to determine which | ||
795 | * unmasked exception was caught we must mask the exception mask bits | ||
796 | * at 0x1f80, and then use these to mask the exception bits at 0x3f. | ||
797 | */ | ||
798 | mxcsr = get_fpu_mxcsr(task); | ||
799 | switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { | ||
800 | case 0x000: | ||
801 | default: | ||
802 | break; | ||
803 | case 0x001: /* Invalid Op */ | ||
804 | info.si_code = FPE_FLTINV; | ||
805 | break; | ||
806 | case 0x002: /* Denormalize */ | ||
807 | case 0x010: /* Underflow */ | ||
808 | info.si_code = FPE_FLTUND; | ||
809 | break; | ||
810 | case 0x004: /* Zero Divide */ | ||
811 | info.si_code = FPE_FLTDIV; | ||
812 | break; | ||
813 | case 0x008: /* Overflow */ | ||
814 | info.si_code = FPE_FLTOVF; | ||
815 | break; | ||
816 | case 0x020: /* Precision */ | ||
817 | info.si_code = FPE_FLTRES; | ||
818 | break; | ||
819 | } | ||
820 | force_sig_info(SIGFPE, &info, task); | ||
821 | } | ||
822 | |||
823 | dotraplinkage void | ||
824 | do_simd_coprocessor_error(struct pt_regs *regs, long error_code) | ||
825 | { | ||
826 | conditional_sti(regs); | ||
827 | |||
828 | #ifdef CONFIG_X86_32 | ||
829 | if (cpu_has_xmm) { | ||
830 | /* Handle SIMD FPU exceptions on PIII+ processors. */ | ||
831 | ignore_fpu_irq = 1; | ||
832 | simd_math_error((void __user *)regs->ip); | ||
833 | return; | ||
834 | } | ||
835 | /* | ||
836 | * Handle strange cache flush from user space exception | ||
837 | * in all other cases. This is undocumented behaviour. | ||
838 | */ | ||
839 | if (regs->flags & X86_VM_MASK) { | ||
840 | handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code); | ||
841 | return; | ||
842 | } | ||
843 | current->thread.trap_no = 19; | ||
844 | current->thread.error_code = error_code; | ||
845 | die_if_kernel("cache flush denied", regs, error_code); | ||
846 | force_sig(SIGSEGV, current); | ||
847 | #else | ||
848 | if (!user_mode(regs) && | ||
849 | kernel_math_error(regs, "kernel simd math error", 19)) | ||
850 | return; | ||
851 | simd_math_error((void __user *)regs->ip); | ||
852 | #endif | ||
853 | } | ||
854 | |||
855 | dotraplinkage void | ||
856 | do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) | ||
857 | { | ||
858 | conditional_sti(regs); | ||
859 | #if 0 | ||
860 | /* No need to warn about this any longer. */ | ||
861 | printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); | ||
862 | #endif | ||
863 | } | ||
864 | |||
865 | #ifdef CONFIG_X86_32 | ||
866 | unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp) | ||
867 | { | ||
868 | struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id()); | ||
869 | unsigned long base = (kesp - uesp) & -THREAD_SIZE; | ||
870 | unsigned long new_kesp = kesp - base; | ||
871 | unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT; | ||
872 | __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS]; | ||
873 | |||
874 | /* Set up base for espfix segment */ | ||
875 | desc &= 0x00f0ff0000000000ULL; | ||
876 | desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) | | ||
877 | ((((__u64)base) << 32) & 0xff00000000000000ULL) | | ||
878 | ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) | | ||
879 | (lim_pages & 0xffff); | ||
880 | *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc; | ||
881 | |||
882 | return new_kesp; | ||
883 | } | ||
884 | #else | ||
885 | asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) | ||
886 | { | ||
887 | } | ||
888 | |||
889 | asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void) | ||
890 | { | ||
891 | } | ||
892 | #endif | ||
893 | |||
894 | /* | ||
895 | * 'math_state_restore()' saves the current math information in the | ||
896 | * old math state array, and gets the new ones from the current task | ||
897 | * | ||
898 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. | ||
899 | * Don't touch unless you *really* know how it works. | ||
900 | * | ||
901 | * Must be called with kernel preemption disabled (in this case, | ||
902 | * local interrupts are disabled at the call-site in entry.S). | ||
903 | */ | ||
904 | asmlinkage void math_state_restore(void) | ||
905 | { | ||
906 | struct thread_info *thread = current_thread_info(); | ||
907 | struct task_struct *tsk = thread->task; | ||
908 | |||
909 | if (!tsk_used_math(tsk)) { | ||
910 | local_irq_enable(); | ||
911 | /* | ||
912 | * does a slab alloc which can sleep | ||
913 | */ | ||
914 | if (init_fpu(tsk)) { | ||
915 | /* | ||
916 | * ran out of memory! | ||
917 | */ | ||
918 | do_group_exit(SIGKILL); | ||
919 | return; | ||
920 | } | ||
921 | local_irq_disable(); | ||
922 | } | ||
923 | |||
924 | clts(); /* Allow maths ops (or we recurse) */ | ||
925 | #ifdef CONFIG_X86_32 | ||
926 | restore_fpu(tsk); | ||
927 | #else | ||
928 | /* | ||
929 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. | ||
930 | */ | ||
931 | if (unlikely(restore_fpu_checking(tsk))) { | ||
932 | stts(); | ||
933 | force_sig(SIGSEGV, tsk); | ||
934 | return; | ||
935 | } | ||
936 | #endif | ||
937 | thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ | ||
938 | tsk->fpu_counter++; | ||
939 | } | ||
940 | EXPORT_SYMBOL_GPL(math_state_restore); | ||
941 | |||
942 | #ifndef CONFIG_MATH_EMULATION | ||
943 | asmlinkage void math_emulate(long arg) | ||
944 | { | ||
945 | printk(KERN_EMERG | ||
946 | "math-emulation not enabled and no coprocessor found.\n"); | ||
947 | printk(KERN_EMERG "killing %s.\n", current->comm); | ||
948 | force_sig(SIGFPE, current); | ||
949 | schedule(); | ||
950 | } | ||
951 | #endif /* CONFIG_MATH_EMULATION */ | ||
952 | |||
953 | dotraplinkage void __kprobes | ||
954 | do_device_not_available(struct pt_regs *regs, long error) | ||
955 | { | ||
956 | #ifdef CONFIG_X86_32 | ||
957 | if (read_cr0() & X86_CR0_EM) { | ||
958 | conditional_sti(regs); | ||
959 | math_emulate(0); | ||
960 | } else { | ||
961 | math_state_restore(); /* interrupts still off */ | ||
962 | conditional_sti(regs); | ||
963 | } | ||
964 | #else | ||
965 | math_state_restore(); | ||
966 | #endif | ||
967 | } | ||
968 | |||
969 | #ifdef CONFIG_X86_32 | ||
970 | #ifdef CONFIG_X86_MCE | ||
971 | dotraplinkage void __kprobes do_machine_check(struct pt_regs *regs, long error) | ||
972 | { | ||
973 | conditional_sti(regs); | ||
974 | machine_check_vector(regs, error); | ||
975 | } | ||
976 | #endif | ||
977 | |||
978 | dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) | ||
979 | { | ||
980 | siginfo_t info; | ||
981 | local_irq_enable(); | ||
982 | |||
983 | info.si_signo = SIGILL; | ||
984 | info.si_errno = 0; | ||
985 | info.si_code = ILL_BADSTK; | ||
986 | info.si_addr = 0; | ||
987 | if (notify_die(DIE_TRAP, "iret exception", | ||
988 | regs, error_code, 32, SIGILL) == NOTIFY_STOP) | ||
989 | return; | ||
990 | do_trap(32, SIGILL, "iret exception", regs, error_code, &info); | ||
991 | } | ||
992 | #endif | ||
993 | |||
994 | void __init trap_init(void) | ||
995 | { | ||
996 | #ifdef CONFIG_X86_32 | ||
997 | int i; | ||
998 | #endif | ||
999 | |||
1000 | #ifdef CONFIG_EISA | ||
1001 | void __iomem *p = early_ioremap(0x0FFFD9, 4); | ||
1002 | |||
1003 | if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) | ||
1004 | EISA_bus = 1; | ||
1005 | early_iounmap(p, 4); | ||
1006 | #endif | ||
1007 | |||
1008 | set_intr_gate(0, ÷_error); | ||
1009 | set_intr_gate_ist(1, &debug, DEBUG_STACK); | ||
1010 | set_intr_gate_ist(2, &nmi, NMI_STACK); | ||
1011 | /* int3 can be called from all */ | ||
1012 | set_system_intr_gate_ist(3, &int3, DEBUG_STACK); | ||
1013 | /* int4 can be called from all */ | ||
1014 | set_system_intr_gate(4, &overflow); | ||
1015 | set_intr_gate(5, &bounds); | ||
1016 | set_intr_gate(6, &invalid_op); | ||
1017 | set_intr_gate(7, &device_not_available); | ||
1018 | #ifdef CONFIG_X86_32 | ||
1019 | set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS); | ||
1020 | #else | ||
1021 | set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK); | ||
1022 | #endif | ||
1023 | set_intr_gate(9, &coprocessor_segment_overrun); | ||
1024 | set_intr_gate(10, &invalid_TSS); | ||
1025 | set_intr_gate(11, &segment_not_present); | ||
1026 | set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK); | ||
1027 | set_intr_gate(13, &general_protection); | ||
1028 | set_intr_gate(14, &page_fault); | ||
1029 | set_intr_gate(15, &spurious_interrupt_bug); | ||
1030 | set_intr_gate(16, &coprocessor_error); | ||
1031 | set_intr_gate(17, &alignment_check); | ||
1032 | #ifdef CONFIG_X86_MCE | ||
1033 | set_intr_gate_ist(18, &machine_check, MCE_STACK); | ||
1034 | #endif | ||
1035 | set_intr_gate(19, &simd_coprocessor_error); | ||
1036 | |||
1037 | #ifdef CONFIG_IA32_EMULATION | ||
1038 | set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall); | ||
1039 | #endif | ||
1040 | |||
1041 | #ifdef CONFIG_X86_32 | ||
1042 | if (cpu_has_fxsr) { | ||
1043 | printk(KERN_INFO "Enabling fast FPU save and restore... "); | ||
1044 | set_in_cr4(X86_CR4_OSFXSR); | ||
1045 | printk("done.\n"); | ||
1046 | } | ||
1047 | if (cpu_has_xmm) { | ||
1048 | printk(KERN_INFO | ||
1049 | "Enabling unmasked SIMD FPU exception support... "); | ||
1050 | set_in_cr4(X86_CR4_OSXMMEXCPT); | ||
1051 | printk("done.\n"); | ||
1052 | } | ||
1053 | |||
1054 | set_system_trap_gate(SYSCALL_VECTOR, &system_call); | ||
1055 | |||
1056 | /* Reserve all the builtin and the syscall vector: */ | ||
1057 | for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) | ||
1058 | set_bit(i, used_vectors); | ||
1059 | |||
1060 | set_bit(SYSCALL_VECTOR, used_vectors); | ||
1061 | #endif | ||
1062 | /* | ||
1063 | * Should be a barrier for any external CPU state: | ||
1064 | */ | ||
1065 | cpu_init(); | ||
1066 | |||
1067 | #ifdef CONFIG_X86_32 | ||
1068 | trap_init_hook(); | ||
1069 | #endif | ||
1070 | } | ||