diff options
Diffstat (limited to 'arch/x86/kernel/traps_64.c')
-rw-r--r-- | arch/x86/kernel/traps_64.c | 1212 |
1 files changed, 0 insertions, 1212 deletions
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c deleted file mode 100644 index 513caaca7115..000000000000 --- a/arch/x86/kernel/traps_64.c +++ /dev/null | |||
@@ -1,1212 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs | ||
4 | * | ||
5 | * Pentium III FXSR, SSE support | ||
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | ||
7 | */ | ||
8 | |||
9 | /* | ||
10 | * 'Traps.c' handles hardware traps and faults after we have saved some | ||
11 | * state in 'entry.S'. | ||
12 | */ | ||
13 | #include <linux/moduleparam.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/kallsyms.h> | ||
16 | #include <linux/spinlock.h> | ||
17 | #include <linux/kprobes.h> | ||
18 | #include <linux/uaccess.h> | ||
19 | #include <linux/utsname.h> | ||
20 | #include <linux/kdebug.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/ptrace.h> | ||
24 | #include <linux/string.h> | ||
25 | #include <linux/unwind.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/errno.h> | ||
28 | #include <linux/kexec.h> | ||
29 | #include <linux/sched.h> | ||
30 | #include <linux/timer.h> | ||
31 | #include <linux/init.h> | ||
32 | #include <linux/bug.h> | ||
33 | #include <linux/nmi.h> | ||
34 | #include <linux/mm.h> | ||
35 | |||
36 | #if defined(CONFIG_EDAC) | ||
37 | #include <linux/edac.h> | ||
38 | #endif | ||
39 | |||
40 | #include <asm/stacktrace.h> | ||
41 | #include <asm/processor.h> | ||
42 | #include <asm/debugreg.h> | ||
43 | #include <asm/atomic.h> | ||
44 | #include <asm/system.h> | ||
45 | #include <asm/unwind.h> | ||
46 | #include <asm/desc.h> | ||
47 | #include <asm/i387.h> | ||
48 | #include <asm/nmi.h> | ||
49 | #include <asm/smp.h> | ||
50 | #include <asm/io.h> | ||
51 | #include <asm/pgalloc.h> | ||
52 | #include <asm/proto.h> | ||
53 | #include <asm/pda.h> | ||
54 | #include <asm/traps.h> | ||
55 | |||
56 | #include <mach_traps.h> | ||
57 | |||
58 | int panic_on_unrecovered_nmi; | ||
59 | int kstack_depth_to_print = 12; | ||
60 | static unsigned int code_bytes = 64; | ||
61 | static int ignore_nmis; | ||
62 | static int die_counter; | ||
63 | |||
64 | static inline void conditional_sti(struct pt_regs *regs) | ||
65 | { | ||
66 | if (regs->flags & X86_EFLAGS_IF) | ||
67 | local_irq_enable(); | ||
68 | } | ||
69 | |||
70 | static inline void preempt_conditional_sti(struct pt_regs *regs) | ||
71 | { | ||
72 | inc_preempt_count(); | ||
73 | if (regs->flags & X86_EFLAGS_IF) | ||
74 | local_irq_enable(); | ||
75 | } | ||
76 | |||
77 | static inline void preempt_conditional_cli(struct pt_regs *regs) | ||
78 | { | ||
79 | if (regs->flags & X86_EFLAGS_IF) | ||
80 | local_irq_disable(); | ||
81 | /* Make sure to not schedule here because we could be running | ||
82 | on an exception stack. */ | ||
83 | dec_preempt_count(); | ||
84 | } | ||
85 | |||
86 | void printk_address(unsigned long address, int reliable) | ||
87 | { | ||
88 | printk(" [<%016lx>] %s%pS\n", address, reliable ? "": "? ", (void *) address); | ||
89 | } | ||
90 | |||
91 | static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | ||
92 | unsigned *usedp, char **idp) | ||
93 | { | ||
94 | static char ids[][8] = { | ||
95 | [DEBUG_STACK - 1] = "#DB", | ||
96 | [NMI_STACK - 1] = "NMI", | ||
97 | [DOUBLEFAULT_STACK - 1] = "#DF", | ||
98 | [STACKFAULT_STACK - 1] = "#SS", | ||
99 | [MCE_STACK - 1] = "#MC", | ||
100 | #if DEBUG_STKSZ > EXCEPTION_STKSZ | ||
101 | [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]" | ||
102 | #endif | ||
103 | }; | ||
104 | unsigned k; | ||
105 | |||
106 | /* | ||
107 | * Iterate over all exception stacks, and figure out whether | ||
108 | * 'stack' is in one of them: | ||
109 | */ | ||
110 | for (k = 0; k < N_EXCEPTION_STACKS; k++) { | ||
111 | unsigned long end = per_cpu(orig_ist, cpu).ist[k]; | ||
112 | /* | ||
113 | * Is 'stack' above this exception frame's end? | ||
114 | * If yes then skip to the next frame. | ||
115 | */ | ||
116 | if (stack >= end) | ||
117 | continue; | ||
118 | /* | ||
119 | * Is 'stack' above this exception frame's start address? | ||
120 | * If yes then we found the right frame. | ||
121 | */ | ||
122 | if (stack >= end - EXCEPTION_STKSZ) { | ||
123 | /* | ||
124 | * Make sure we only iterate through an exception | ||
125 | * stack once. If it comes up for the second time | ||
126 | * then there's something wrong going on - just | ||
127 | * break out and return NULL: | ||
128 | */ | ||
129 | if (*usedp & (1U << k)) | ||
130 | break; | ||
131 | *usedp |= 1U << k; | ||
132 | *idp = ids[k]; | ||
133 | return (unsigned long *)end; | ||
134 | } | ||
135 | /* | ||
136 | * If this is a debug stack, and if it has a larger size than | ||
137 | * the usual exception stacks, then 'stack' might still | ||
138 | * be within the lower portion of the debug stack: | ||
139 | */ | ||
140 | #if DEBUG_STKSZ > EXCEPTION_STKSZ | ||
141 | if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) { | ||
142 | unsigned j = N_EXCEPTION_STACKS - 1; | ||
143 | |||
144 | /* | ||
145 | * Black magic. A large debug stack is composed of | ||
146 | * multiple exception stack entries, which we | ||
147 | * iterate through now. Dont look: | ||
148 | */ | ||
149 | do { | ||
150 | ++j; | ||
151 | end -= EXCEPTION_STKSZ; | ||
152 | ids[j][4] = '1' + (j - N_EXCEPTION_STACKS); | ||
153 | } while (stack < end - EXCEPTION_STKSZ); | ||
154 | if (*usedp & (1U << j)) | ||
155 | break; | ||
156 | *usedp |= 1U << j; | ||
157 | *idp = ids[j]; | ||
158 | return (unsigned long *)end; | ||
159 | } | ||
160 | #endif | ||
161 | } | ||
162 | return NULL; | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * x86-64 can have up to three kernel stacks: | ||
167 | * process stack | ||
168 | * interrupt stack | ||
169 | * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack | ||
170 | */ | ||
171 | |||
172 | static inline int valid_stack_ptr(struct thread_info *tinfo, | ||
173 | void *p, unsigned int size, void *end) | ||
174 | { | ||
175 | void *t = tinfo; | ||
176 | if (end) { | ||
177 | if (p < end && p >= (end-THREAD_SIZE)) | ||
178 | return 1; | ||
179 | else | ||
180 | return 0; | ||
181 | } | ||
182 | return p > t && p < t + THREAD_SIZE - size; | ||
183 | } | ||
184 | |||
185 | /* The form of the top of the frame on the stack */ | ||
186 | struct stack_frame { | ||
187 | struct stack_frame *next_frame; | ||
188 | unsigned long return_address; | ||
189 | }; | ||
190 | |||
191 | static inline unsigned long | ||
192 | print_context_stack(struct thread_info *tinfo, | ||
193 | unsigned long *stack, unsigned long bp, | ||
194 | const struct stacktrace_ops *ops, void *data, | ||
195 | unsigned long *end) | ||
196 | { | ||
197 | struct stack_frame *frame = (struct stack_frame *)bp; | ||
198 | |||
199 | while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) { | ||
200 | unsigned long addr; | ||
201 | |||
202 | addr = *stack; | ||
203 | if (__kernel_text_address(addr)) { | ||
204 | if ((unsigned long) stack == bp + 8) { | ||
205 | ops->address(data, addr, 1); | ||
206 | frame = frame->next_frame; | ||
207 | bp = (unsigned long) frame; | ||
208 | } else { | ||
209 | ops->address(data, addr, bp == 0); | ||
210 | } | ||
211 | } | ||
212 | stack++; | ||
213 | } | ||
214 | return bp; | ||
215 | } | ||
216 | |||
217 | void dump_trace(struct task_struct *task, struct pt_regs *regs, | ||
218 | unsigned long *stack, unsigned long bp, | ||
219 | const struct stacktrace_ops *ops, void *data) | ||
220 | { | ||
221 | const unsigned cpu = get_cpu(); | ||
222 | unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr; | ||
223 | unsigned used = 0; | ||
224 | struct thread_info *tinfo; | ||
225 | |||
226 | if (!task) | ||
227 | task = current; | ||
228 | |||
229 | if (!stack) { | ||
230 | unsigned long dummy; | ||
231 | stack = &dummy; | ||
232 | if (task && task != current) | ||
233 | stack = (unsigned long *)task->thread.sp; | ||
234 | } | ||
235 | |||
236 | #ifdef CONFIG_FRAME_POINTER | ||
237 | if (!bp) { | ||
238 | if (task == current) { | ||
239 | /* Grab bp right from our regs */ | ||
240 | asm("movq %%rbp, %0" : "=r" (bp) :); | ||
241 | } else { | ||
242 | /* bp is the last reg pushed by switch_to */ | ||
243 | bp = *(unsigned long *) task->thread.sp; | ||
244 | } | ||
245 | } | ||
246 | #endif | ||
247 | |||
248 | /* | ||
249 | * Print function call entries in all stacks, starting at the | ||
250 | * current stack address. If the stacks consist of nested | ||
251 | * exceptions | ||
252 | */ | ||
253 | tinfo = task_thread_info(task); | ||
254 | for (;;) { | ||
255 | char *id; | ||
256 | unsigned long *estack_end; | ||
257 | estack_end = in_exception_stack(cpu, (unsigned long)stack, | ||
258 | &used, &id); | ||
259 | |||
260 | if (estack_end) { | ||
261 | if (ops->stack(data, id) < 0) | ||
262 | break; | ||
263 | |||
264 | bp = print_context_stack(tinfo, stack, bp, ops, | ||
265 | data, estack_end); | ||
266 | ops->stack(data, "<EOE>"); | ||
267 | /* | ||
268 | * We link to the next stack via the | ||
269 | * second-to-last pointer (index -2 to end) in the | ||
270 | * exception stack: | ||
271 | */ | ||
272 | stack = (unsigned long *) estack_end[-2]; | ||
273 | continue; | ||
274 | } | ||
275 | if (irqstack_end) { | ||
276 | unsigned long *irqstack; | ||
277 | irqstack = irqstack_end - | ||
278 | (IRQSTACKSIZE - 64) / sizeof(*irqstack); | ||
279 | |||
280 | if (stack >= irqstack && stack < irqstack_end) { | ||
281 | if (ops->stack(data, "IRQ") < 0) | ||
282 | break; | ||
283 | bp = print_context_stack(tinfo, stack, bp, | ||
284 | ops, data, irqstack_end); | ||
285 | /* | ||
286 | * We link to the next stack (which would be | ||
287 | * the process stack normally) the last | ||
288 | * pointer (index -1 to end) in the IRQ stack: | ||
289 | */ | ||
290 | stack = (unsigned long *) (irqstack_end[-1]); | ||
291 | irqstack_end = NULL; | ||
292 | ops->stack(data, "EOI"); | ||
293 | continue; | ||
294 | } | ||
295 | } | ||
296 | break; | ||
297 | } | ||
298 | |||
299 | /* | ||
300 | * This handles the process stack: | ||
301 | */ | ||
302 | bp = print_context_stack(tinfo, stack, bp, ops, data, NULL); | ||
303 | put_cpu(); | ||
304 | } | ||
305 | EXPORT_SYMBOL(dump_trace); | ||
306 | |||
307 | static void | ||
308 | print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) | ||
309 | { | ||
310 | print_symbol(msg, symbol); | ||
311 | printk("\n"); | ||
312 | } | ||
313 | |||
314 | static void print_trace_warning(void *data, char *msg) | ||
315 | { | ||
316 | printk("%s\n", msg); | ||
317 | } | ||
318 | |||
319 | static int print_trace_stack(void *data, char *name) | ||
320 | { | ||
321 | printk(" <%s> ", name); | ||
322 | return 0; | ||
323 | } | ||
324 | |||
325 | static void print_trace_address(void *data, unsigned long addr, int reliable) | ||
326 | { | ||
327 | touch_nmi_watchdog(); | ||
328 | printk_address(addr, reliable); | ||
329 | } | ||
330 | |||
331 | static const struct stacktrace_ops print_trace_ops = { | ||
332 | .warning = print_trace_warning, | ||
333 | .warning_symbol = print_trace_warning_symbol, | ||
334 | .stack = print_trace_stack, | ||
335 | .address = print_trace_address, | ||
336 | }; | ||
337 | |||
338 | static void | ||
339 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, | ||
340 | unsigned long *stack, unsigned long bp, char *log_lvl) | ||
341 | { | ||
342 | printk("\nCall Trace:\n"); | ||
343 | dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); | ||
344 | printk("\n"); | ||
345 | } | ||
346 | |||
347 | void show_trace(struct task_struct *task, struct pt_regs *regs, | ||
348 | unsigned long *stack, unsigned long bp) | ||
349 | { | ||
350 | show_trace_log_lvl(task, regs, stack, bp, ""); | ||
351 | } | ||
352 | |||
353 | static void | ||
354 | show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, | ||
355 | unsigned long *sp, unsigned long bp, char *log_lvl) | ||
356 | { | ||
357 | unsigned long *stack; | ||
358 | int i; | ||
359 | const int cpu = smp_processor_id(); | ||
360 | unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr); | ||
361 | unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE); | ||
362 | |||
363 | // debugging aid: "show_stack(NULL, NULL);" prints the | ||
364 | // back trace for this cpu. | ||
365 | |||
366 | if (sp == NULL) { | ||
367 | if (task) | ||
368 | sp = (unsigned long *)task->thread.sp; | ||
369 | else | ||
370 | sp = (unsigned long *)&sp; | ||
371 | } | ||
372 | |||
373 | stack = sp; | ||
374 | for (i = 0; i < kstack_depth_to_print; i++) { | ||
375 | if (stack >= irqstack && stack <= irqstack_end) { | ||
376 | if (stack == irqstack_end) { | ||
377 | stack = (unsigned long *) (irqstack_end[-1]); | ||
378 | printk(" <EOI> "); | ||
379 | } | ||
380 | } else { | ||
381 | if (((long) stack & (THREAD_SIZE-1)) == 0) | ||
382 | break; | ||
383 | } | ||
384 | if (i && ((i % 4) == 0)) | ||
385 | printk("\n"); | ||
386 | printk(" %016lx", *stack++); | ||
387 | touch_nmi_watchdog(); | ||
388 | } | ||
389 | show_trace_log_lvl(task, regs, sp, bp, log_lvl); | ||
390 | } | ||
391 | |||
392 | void show_stack(struct task_struct *task, unsigned long *sp) | ||
393 | { | ||
394 | show_stack_log_lvl(task, NULL, sp, 0, ""); | ||
395 | } | ||
396 | |||
397 | /* | ||
398 | * The architecture-independent dump_stack generator | ||
399 | */ | ||
400 | void dump_stack(void) | ||
401 | { | ||
402 | unsigned long bp = 0; | ||
403 | unsigned long stack; | ||
404 | |||
405 | #ifdef CONFIG_FRAME_POINTER | ||
406 | if (!bp) | ||
407 | asm("movq %%rbp, %0" : "=r" (bp):); | ||
408 | #endif | ||
409 | |||
410 | printk("Pid: %d, comm: %.20s %s %s %.*s\n", | ||
411 | current->pid, current->comm, print_tainted(), | ||
412 | init_utsname()->release, | ||
413 | (int)strcspn(init_utsname()->version, " "), | ||
414 | init_utsname()->version); | ||
415 | show_trace(NULL, NULL, &stack, bp); | ||
416 | } | ||
417 | |||
418 | EXPORT_SYMBOL(dump_stack); | ||
419 | |||
420 | void show_registers(struct pt_regs *regs) | ||
421 | { | ||
422 | int i; | ||
423 | unsigned long sp; | ||
424 | const int cpu = smp_processor_id(); | ||
425 | struct task_struct *cur = cpu_pda(cpu)->pcurrent; | ||
426 | |||
427 | sp = regs->sp; | ||
428 | printk("CPU %d ", cpu); | ||
429 | __show_regs(regs); | ||
430 | printk("Process %s (pid: %d, threadinfo %p, task %p)\n", | ||
431 | cur->comm, cur->pid, task_thread_info(cur), cur); | ||
432 | |||
433 | /* | ||
434 | * When in-kernel, we also print out the stack and code at the | ||
435 | * time of the fault.. | ||
436 | */ | ||
437 | if (!user_mode(regs)) { | ||
438 | unsigned int code_prologue = code_bytes * 43 / 64; | ||
439 | unsigned int code_len = code_bytes; | ||
440 | unsigned char c; | ||
441 | u8 *ip; | ||
442 | |||
443 | printk("Stack: "); | ||
444 | show_stack_log_lvl(NULL, regs, (unsigned long *)sp, | ||
445 | regs->bp, ""); | ||
446 | printk("\n"); | ||
447 | |||
448 | printk(KERN_EMERG "Code: "); | ||
449 | |||
450 | ip = (u8 *)regs->ip - code_prologue; | ||
451 | if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { | ||
452 | /* try starting at RIP */ | ||
453 | ip = (u8 *)regs->ip; | ||
454 | code_len = code_len - code_prologue + 1; | ||
455 | } | ||
456 | for (i = 0; i < code_len; i++, ip++) { | ||
457 | if (ip < (u8 *)PAGE_OFFSET || | ||
458 | probe_kernel_address(ip, c)) { | ||
459 | printk(" Bad RIP value."); | ||
460 | break; | ||
461 | } | ||
462 | if (ip == (u8 *)regs->ip) | ||
463 | printk("<%02x> ", c); | ||
464 | else | ||
465 | printk("%02x ", c); | ||
466 | } | ||
467 | } | ||
468 | printk("\n"); | ||
469 | } | ||
470 | |||
471 | int is_valid_bugaddr(unsigned long ip) | ||
472 | { | ||
473 | unsigned short ud2; | ||
474 | |||
475 | if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2))) | ||
476 | return 0; | ||
477 | |||
478 | return ud2 == 0x0b0f; | ||
479 | } | ||
480 | |||
481 | static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; | ||
482 | static int die_owner = -1; | ||
483 | static unsigned int die_nest_count; | ||
484 | |||
485 | unsigned __kprobes long oops_begin(void) | ||
486 | { | ||
487 | int cpu; | ||
488 | unsigned long flags; | ||
489 | |||
490 | oops_enter(); | ||
491 | |||
492 | /* racy, but better than risking deadlock. */ | ||
493 | raw_local_irq_save(flags); | ||
494 | cpu = smp_processor_id(); | ||
495 | if (!__raw_spin_trylock(&die_lock)) { | ||
496 | if (cpu == die_owner) | ||
497 | /* nested oops. should stop eventually */; | ||
498 | else | ||
499 | __raw_spin_lock(&die_lock); | ||
500 | } | ||
501 | die_nest_count++; | ||
502 | die_owner = cpu; | ||
503 | console_verbose(); | ||
504 | bust_spinlocks(1); | ||
505 | return flags; | ||
506 | } | ||
507 | |||
508 | void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) | ||
509 | { | ||
510 | die_owner = -1; | ||
511 | bust_spinlocks(0); | ||
512 | die_nest_count--; | ||
513 | if (!die_nest_count) | ||
514 | /* Nest count reaches zero, release the lock. */ | ||
515 | __raw_spin_unlock(&die_lock); | ||
516 | raw_local_irq_restore(flags); | ||
517 | if (!regs) { | ||
518 | oops_exit(); | ||
519 | return; | ||
520 | } | ||
521 | if (panic_on_oops) | ||
522 | panic("Fatal exception"); | ||
523 | oops_exit(); | ||
524 | do_exit(signr); | ||
525 | } | ||
526 | |||
527 | int __kprobes __die(const char *str, struct pt_regs *regs, long err) | ||
528 | { | ||
529 | printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff, ++die_counter); | ||
530 | #ifdef CONFIG_PREEMPT | ||
531 | printk("PREEMPT "); | ||
532 | #endif | ||
533 | #ifdef CONFIG_SMP | ||
534 | printk("SMP "); | ||
535 | #endif | ||
536 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
537 | printk("DEBUG_PAGEALLOC"); | ||
538 | #endif | ||
539 | printk("\n"); | ||
540 | if (notify_die(DIE_OOPS, str, regs, err, | ||
541 | current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) | ||
542 | return 1; | ||
543 | |||
544 | show_registers(regs); | ||
545 | add_taint(TAINT_DIE); | ||
546 | /* Executive summary in case the oops scrolled away */ | ||
547 | printk(KERN_ALERT "RIP "); | ||
548 | printk_address(regs->ip, 1); | ||
549 | printk(" RSP <%016lx>\n", regs->sp); | ||
550 | if (kexec_should_crash(current)) | ||
551 | crash_kexec(regs); | ||
552 | return 0; | ||
553 | } | ||
554 | |||
555 | void die(const char *str, struct pt_regs *regs, long err) | ||
556 | { | ||
557 | unsigned long flags = oops_begin(); | ||
558 | |||
559 | if (!user_mode(regs)) | ||
560 | report_bug(regs->ip, regs); | ||
561 | |||
562 | if (__die(str, regs, err)) | ||
563 | regs = NULL; | ||
564 | oops_end(flags, regs, SIGSEGV); | ||
565 | } | ||
566 | |||
567 | notrace __kprobes void | ||
568 | die_nmi(char *str, struct pt_regs *regs, int do_panic) | ||
569 | { | ||
570 | unsigned long flags; | ||
571 | |||
572 | if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP) | ||
573 | return; | ||
574 | |||
575 | flags = oops_begin(); | ||
576 | /* | ||
577 | * We are in trouble anyway, lets at least try | ||
578 | * to get a message out. | ||
579 | */ | ||
580 | printk(KERN_EMERG "%s", str); | ||
581 | printk(" on CPU%d, ip %08lx, registers:\n", | ||
582 | smp_processor_id(), regs->ip); | ||
583 | show_registers(regs); | ||
584 | if (kexec_should_crash(current)) | ||
585 | crash_kexec(regs); | ||
586 | if (do_panic || panic_on_oops) | ||
587 | panic("Non maskable interrupt"); | ||
588 | oops_end(flags, NULL, SIGBUS); | ||
589 | nmi_exit(); | ||
590 | local_irq_enable(); | ||
591 | do_exit(SIGBUS); | ||
592 | } | ||
593 | |||
594 | static void __kprobes | ||
595 | do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, | ||
596 | long error_code, siginfo_t *info) | ||
597 | { | ||
598 | struct task_struct *tsk = current; | ||
599 | |||
600 | if (!user_mode(regs)) | ||
601 | goto kernel_trap; | ||
602 | |||
603 | /* | ||
604 | * We want error_code and trap_no set for userspace faults and | ||
605 | * kernelspace faults which result in die(), but not | ||
606 | * kernelspace faults which are fixed up. die() gives the | ||
607 | * process no chance to handle the signal and notice the | ||
608 | * kernel fault information, so that won't result in polluting | ||
609 | * the information about previously queued, but not yet | ||
610 | * delivered, faults. See also do_general_protection below. | ||
611 | */ | ||
612 | tsk->thread.error_code = error_code; | ||
613 | tsk->thread.trap_no = trapnr; | ||
614 | |||
615 | if (show_unhandled_signals && unhandled_signal(tsk, signr) && | ||
616 | printk_ratelimit()) { | ||
617 | printk(KERN_INFO | ||
618 | "%s[%d] trap %s ip:%lx sp:%lx error:%lx", | ||
619 | tsk->comm, tsk->pid, str, | ||
620 | regs->ip, regs->sp, error_code); | ||
621 | print_vma_addr(" in ", regs->ip); | ||
622 | printk("\n"); | ||
623 | } | ||
624 | |||
625 | if (info) | ||
626 | force_sig_info(signr, info, tsk); | ||
627 | else | ||
628 | force_sig(signr, tsk); | ||
629 | return; | ||
630 | |||
631 | kernel_trap: | ||
632 | if (!fixup_exception(regs)) { | ||
633 | tsk->thread.error_code = error_code; | ||
634 | tsk->thread.trap_no = trapnr; | ||
635 | die(str, regs, error_code); | ||
636 | } | ||
637 | return; | ||
638 | } | ||
639 | |||
640 | #define DO_ERROR(trapnr, signr, str, name) \ | ||
641 | asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | ||
642 | { \ | ||
643 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | ||
644 | == NOTIFY_STOP) \ | ||
645 | return; \ | ||
646 | conditional_sti(regs); \ | ||
647 | do_trap(trapnr, signr, str, regs, error_code, NULL); \ | ||
648 | } | ||
649 | |||
650 | #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ | ||
651 | asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | ||
652 | { \ | ||
653 | siginfo_t info; \ | ||
654 | info.si_signo = signr; \ | ||
655 | info.si_errno = 0; \ | ||
656 | info.si_code = sicode; \ | ||
657 | info.si_addr = (void __user *)siaddr; \ | ||
658 | trace_hardirqs_fixup(); \ | ||
659 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | ||
660 | == NOTIFY_STOP) \ | ||
661 | return; \ | ||
662 | conditional_sti(regs); \ | ||
663 | do_trap(trapnr, signr, str, regs, error_code, &info); \ | ||
664 | } | ||
665 | |||
666 | DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) | ||
667 | DO_ERROR(4, SIGSEGV, "overflow", overflow) | ||
668 | DO_ERROR(5, SIGSEGV, "bounds", bounds) | ||
669 | DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip) | ||
670 | DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) | ||
671 | DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) | ||
672 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) | ||
673 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) | ||
674 | |||
675 | /* Runs on IST stack */ | ||
676 | asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code) | ||
677 | { | ||
678 | if (notify_die(DIE_TRAP, "stack segment", regs, error_code, | ||
679 | 12, SIGBUS) == NOTIFY_STOP) | ||
680 | return; | ||
681 | preempt_conditional_sti(regs); | ||
682 | do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL); | ||
683 | preempt_conditional_cli(regs); | ||
684 | } | ||
685 | |||
686 | asmlinkage void do_double_fault(struct pt_regs * regs, long error_code) | ||
687 | { | ||
688 | static const char str[] = "double fault"; | ||
689 | struct task_struct *tsk = current; | ||
690 | |||
691 | /* Return not checked because double check cannot be ignored */ | ||
692 | notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV); | ||
693 | |||
694 | tsk->thread.error_code = error_code; | ||
695 | tsk->thread.trap_no = 8; | ||
696 | |||
697 | /* This is always a kernel trap and never fixable (and thus must | ||
698 | never return). */ | ||
699 | for (;;) | ||
700 | die(str, regs, error_code); | ||
701 | } | ||
702 | |||
703 | asmlinkage void __kprobes | ||
704 | do_general_protection(struct pt_regs *regs, long error_code) | ||
705 | { | ||
706 | struct task_struct *tsk; | ||
707 | |||
708 | conditional_sti(regs); | ||
709 | |||
710 | tsk = current; | ||
711 | if (!user_mode(regs)) | ||
712 | goto gp_in_kernel; | ||
713 | |||
714 | tsk->thread.error_code = error_code; | ||
715 | tsk->thread.trap_no = 13; | ||
716 | |||
717 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && | ||
718 | printk_ratelimit()) { | ||
719 | printk(KERN_INFO | ||
720 | "%s[%d] general protection ip:%lx sp:%lx error:%lx", | ||
721 | tsk->comm, tsk->pid, | ||
722 | regs->ip, regs->sp, error_code); | ||
723 | print_vma_addr(" in ", regs->ip); | ||
724 | printk("\n"); | ||
725 | } | ||
726 | |||
727 | force_sig(SIGSEGV, tsk); | ||
728 | return; | ||
729 | |||
730 | gp_in_kernel: | ||
731 | if (fixup_exception(regs)) | ||
732 | return; | ||
733 | |||
734 | tsk->thread.error_code = error_code; | ||
735 | tsk->thread.trap_no = 13; | ||
736 | if (notify_die(DIE_GPF, "general protection fault", regs, | ||
737 | error_code, 13, SIGSEGV) == NOTIFY_STOP) | ||
738 | return; | ||
739 | die("general protection fault", regs, error_code); | ||
740 | } | ||
741 | |||
742 | static notrace __kprobes void | ||
743 | mem_parity_error(unsigned char reason, struct pt_regs *regs) | ||
744 | { | ||
745 | printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", | ||
746 | reason); | ||
747 | printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n"); | ||
748 | |||
749 | #if defined(CONFIG_EDAC) | ||
750 | if (edac_handler_set()) { | ||
751 | edac_atomic_assert_error(); | ||
752 | return; | ||
753 | } | ||
754 | #endif | ||
755 | |||
756 | if (panic_on_unrecovered_nmi) | ||
757 | panic("NMI: Not continuing"); | ||
758 | |||
759 | printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); | ||
760 | |||
761 | /* Clear and disable the memory parity error line. */ | ||
762 | reason = (reason & 0xf) | 4; | ||
763 | outb(reason, 0x61); | ||
764 | } | ||
765 | |||
766 | static notrace __kprobes void | ||
767 | io_check_error(unsigned char reason, struct pt_regs *regs) | ||
768 | { | ||
769 | printk("NMI: IOCK error (debug interrupt?)\n"); | ||
770 | show_registers(regs); | ||
771 | |||
772 | /* Re-enable the IOCK line, wait for a few seconds */ | ||
773 | reason = (reason & 0xf) | 8; | ||
774 | outb(reason, 0x61); | ||
775 | mdelay(2000); | ||
776 | reason &= ~8; | ||
777 | outb(reason, 0x61); | ||
778 | } | ||
779 | |||
780 | static notrace __kprobes void | ||
781 | unknown_nmi_error(unsigned char reason, struct pt_regs * regs) | ||
782 | { | ||
783 | if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) | ||
784 | return; | ||
785 | printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", | ||
786 | reason); | ||
787 | printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n"); | ||
788 | |||
789 | if (panic_on_unrecovered_nmi) | ||
790 | panic("NMI: Not continuing"); | ||
791 | |||
792 | printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); | ||
793 | } | ||
794 | |||
795 | /* Runs on IST stack. This code must keep interrupts off all the time. | ||
796 | Nested NMIs are prevented by the CPU. */ | ||
797 | asmlinkage notrace __kprobes void default_do_nmi(struct pt_regs *regs) | ||
798 | { | ||
799 | unsigned char reason = 0; | ||
800 | int cpu; | ||
801 | |||
802 | cpu = smp_processor_id(); | ||
803 | |||
804 | /* Only the BSP gets external NMIs from the system. */ | ||
805 | if (!cpu) | ||
806 | reason = get_nmi_reason(); | ||
807 | |||
808 | if (!(reason & 0xc0)) { | ||
809 | if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT) | ||
810 | == NOTIFY_STOP) | ||
811 | return; | ||
812 | /* | ||
813 | * Ok, so this is none of the documented NMI sources, | ||
814 | * so it must be the NMI watchdog. | ||
815 | */ | ||
816 | if (nmi_watchdog_tick(regs, reason)) | ||
817 | return; | ||
818 | if (!do_nmi_callback(regs, cpu)) | ||
819 | unknown_nmi_error(reason, regs); | ||
820 | |||
821 | return; | ||
822 | } | ||
823 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) | ||
824 | return; | ||
825 | |||
826 | /* AK: following checks seem to be broken on modern chipsets. FIXME */ | ||
827 | if (reason & 0x80) | ||
828 | mem_parity_error(reason, regs); | ||
829 | if (reason & 0x40) | ||
830 | io_check_error(reason, regs); | ||
831 | } | ||
832 | |||
833 | asmlinkage notrace __kprobes void | ||
834 | do_nmi(struct pt_regs *regs, long error_code) | ||
835 | { | ||
836 | nmi_enter(); | ||
837 | |||
838 | add_pda(__nmi_count, 1); | ||
839 | |||
840 | if (!ignore_nmis) | ||
841 | default_do_nmi(regs); | ||
842 | |||
843 | nmi_exit(); | ||
844 | } | ||
845 | |||
846 | void stop_nmi(void) | ||
847 | { | ||
848 | acpi_nmi_disable(); | ||
849 | ignore_nmis++; | ||
850 | } | ||
851 | |||
852 | void restart_nmi(void) | ||
853 | { | ||
854 | ignore_nmis--; | ||
855 | acpi_nmi_enable(); | ||
856 | } | ||
857 | |||
858 | /* runs on IST stack. */ | ||
859 | asmlinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) | ||
860 | { | ||
861 | trace_hardirqs_fixup(); | ||
862 | |||
863 | if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) | ||
864 | == NOTIFY_STOP) | ||
865 | return; | ||
866 | |||
867 | preempt_conditional_sti(regs); | ||
868 | do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); | ||
869 | preempt_conditional_cli(regs); | ||
870 | } | ||
871 | |||
872 | /* Help handler running on IST stack to switch back to user stack | ||
873 | for scheduling or signal handling. The actual stack switch is done in | ||
874 | entry.S */ | ||
875 | asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) | ||
876 | { | ||
877 | struct pt_regs *regs = eregs; | ||
878 | /* Did already sync */ | ||
879 | if (eregs == (struct pt_regs *)eregs->sp) | ||
880 | ; | ||
881 | /* Exception from user space */ | ||
882 | else if (user_mode(eregs)) | ||
883 | regs = task_pt_regs(current); | ||
884 | /* Exception from kernel and interrupts are enabled. Move to | ||
885 | kernel process stack. */ | ||
886 | else if (eregs->flags & X86_EFLAGS_IF) | ||
887 | regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); | ||
888 | if (eregs != regs) | ||
889 | *regs = *eregs; | ||
890 | return regs; | ||
891 | } | ||
892 | |||
893 | /* runs on IST stack. */ | ||
894 | asmlinkage void __kprobes do_debug(struct pt_regs * regs, | ||
895 | unsigned long error_code) | ||
896 | { | ||
897 | struct task_struct *tsk = current; | ||
898 | unsigned long condition; | ||
899 | siginfo_t info; | ||
900 | |||
901 | trace_hardirqs_fixup(); | ||
902 | |||
903 | get_debugreg(condition, 6); | ||
904 | |||
905 | /* | ||
906 | * The processor cleared BTF, so don't mark that we need it set. | ||
907 | */ | ||
908 | clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR); | ||
909 | tsk->thread.debugctlmsr = 0; | ||
910 | |||
911 | if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, | ||
912 | SIGTRAP) == NOTIFY_STOP) | ||
913 | return; | ||
914 | |||
915 | preempt_conditional_sti(regs); | ||
916 | |||
917 | /* Mask out spurious debug traps due to lazy DR7 setting */ | ||
918 | if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { | ||
919 | if (!tsk->thread.debugreg7) | ||
920 | goto clear_dr7; | ||
921 | } | ||
922 | |||
923 | tsk->thread.debugreg6 = condition; | ||
924 | |||
925 | /* | ||
926 | * Single-stepping through TF: make sure we ignore any events in | ||
927 | * kernel space (but re-enable TF when returning to user mode). | ||
928 | */ | ||
929 | if (condition & DR_STEP) { | ||
930 | if (!user_mode(regs)) | ||
931 | goto clear_TF_reenable; | ||
932 | } | ||
933 | |||
934 | /* Ok, finally something we can handle */ | ||
935 | tsk->thread.trap_no = 1; | ||
936 | tsk->thread.error_code = error_code; | ||
937 | info.si_signo = SIGTRAP; | ||
938 | info.si_errno = 0; | ||
939 | info.si_code = TRAP_BRKPT; | ||
940 | info.si_addr = user_mode(regs) ? (void __user *)regs->ip : NULL; | ||
941 | force_sig_info(SIGTRAP, &info, tsk); | ||
942 | |||
943 | clear_dr7: | ||
944 | set_debugreg(0, 7); | ||
945 | preempt_conditional_cli(regs); | ||
946 | return; | ||
947 | |||
948 | clear_TF_reenable: | ||
949 | set_tsk_thread_flag(tsk, TIF_SINGLESTEP); | ||
950 | regs->flags &= ~X86_EFLAGS_TF; | ||
951 | preempt_conditional_cli(regs); | ||
952 | return; | ||
953 | } | ||
954 | |||
955 | static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr) | ||
956 | { | ||
957 | if (fixup_exception(regs)) | ||
958 | return 1; | ||
959 | |||
960 | notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE); | ||
961 | /* Illegal floating point operation in the kernel */ | ||
962 | current->thread.trap_no = trapnr; | ||
963 | die(str, regs, 0); | ||
964 | return 0; | ||
965 | } | ||
966 | |||
967 | /* | ||
968 | * Note that we play around with the 'TS' bit in an attempt to get | ||
969 | * the correct behaviour even in the presence of the asynchronous | ||
970 | * IRQ13 behaviour | ||
971 | */ | ||
972 | asmlinkage void do_coprocessor_error(struct pt_regs *regs) | ||
973 | { | ||
974 | void __user *ip = (void __user *)(regs->ip); | ||
975 | struct task_struct *task; | ||
976 | siginfo_t info; | ||
977 | unsigned short cwd, swd; | ||
978 | |||
979 | conditional_sti(regs); | ||
980 | if (!user_mode(regs) && | ||
981 | kernel_math_error(regs, "kernel x87 math error", 16)) | ||
982 | return; | ||
983 | |||
984 | /* | ||
985 | * Save the info for the exception handler and clear the error. | ||
986 | */ | ||
987 | task = current; | ||
988 | save_init_fpu(task); | ||
989 | task->thread.trap_no = 16; | ||
990 | task->thread.error_code = 0; | ||
991 | info.si_signo = SIGFPE; | ||
992 | info.si_errno = 0; | ||
993 | info.si_code = __SI_FAULT; | ||
994 | info.si_addr = ip; | ||
995 | /* | ||
996 | * (~cwd & swd) will mask out exceptions that are not set to unmasked | ||
997 | * status. 0x3f is the exception bits in these regs, 0x200 is the | ||
998 | * C1 reg you need in case of a stack fault, 0x040 is the stack | ||
999 | * fault bit. We should only be taking one exception at a time, | ||
1000 | * so if this combination doesn't produce any single exception, | ||
1001 | * then we have a bad program that isn't synchronizing its FPU usage | ||
1002 | * and it will suffer the consequences since we won't be able to | ||
1003 | * fully reproduce the context of the exception | ||
1004 | */ | ||
1005 | cwd = get_fpu_cwd(task); | ||
1006 | swd = get_fpu_swd(task); | ||
1007 | switch (swd & ~cwd & 0x3f) { | ||
1008 | case 0x000: /* No unmasked exception */ | ||
1009 | default: /* Multiple exceptions */ | ||
1010 | break; | ||
1011 | case 0x001: /* Invalid Op */ | ||
1012 | /* | ||
1013 | * swd & 0x240 == 0x040: Stack Underflow | ||
1014 | * swd & 0x240 == 0x240: Stack Overflow | ||
1015 | * User must clear the SF bit (0x40) if set | ||
1016 | */ | ||
1017 | info.si_code = FPE_FLTINV; | ||
1018 | break; | ||
1019 | case 0x002: /* Denormalize */ | ||
1020 | case 0x010: /* Underflow */ | ||
1021 | info.si_code = FPE_FLTUND; | ||
1022 | break; | ||
1023 | case 0x004: /* Zero Divide */ | ||
1024 | info.si_code = FPE_FLTDIV; | ||
1025 | break; | ||
1026 | case 0x008: /* Overflow */ | ||
1027 | info.si_code = FPE_FLTOVF; | ||
1028 | break; | ||
1029 | case 0x020: /* Precision */ | ||
1030 | info.si_code = FPE_FLTRES; | ||
1031 | break; | ||
1032 | } | ||
1033 | force_sig_info(SIGFPE, &info, task); | ||
1034 | } | ||
1035 | |||
1036 | asmlinkage void bad_intr(void) | ||
1037 | { | ||
1038 | printk("bad interrupt"); | ||
1039 | } | ||
1040 | |||
1041 | asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) | ||
1042 | { | ||
1043 | void __user *ip = (void __user *)(regs->ip); | ||
1044 | struct task_struct *task; | ||
1045 | siginfo_t info; | ||
1046 | unsigned short mxcsr; | ||
1047 | |||
1048 | conditional_sti(regs); | ||
1049 | if (!user_mode(regs) && | ||
1050 | kernel_math_error(regs, "kernel simd math error", 19)) | ||
1051 | return; | ||
1052 | |||
1053 | /* | ||
1054 | * Save the info for the exception handler and clear the error. | ||
1055 | */ | ||
1056 | task = current; | ||
1057 | save_init_fpu(task); | ||
1058 | task->thread.trap_no = 19; | ||
1059 | task->thread.error_code = 0; | ||
1060 | info.si_signo = SIGFPE; | ||
1061 | info.si_errno = 0; | ||
1062 | info.si_code = __SI_FAULT; | ||
1063 | info.si_addr = ip; | ||
1064 | /* | ||
1065 | * The SIMD FPU exceptions are handled a little differently, as there | ||
1066 | * is only a single status/control register. Thus, to determine which | ||
1067 | * unmasked exception was caught we must mask the exception mask bits | ||
1068 | * at 0x1f80, and then use these to mask the exception bits at 0x3f. | ||
1069 | */ | ||
1070 | mxcsr = get_fpu_mxcsr(task); | ||
1071 | switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { | ||
1072 | case 0x000: | ||
1073 | default: | ||
1074 | break; | ||
1075 | case 0x001: /* Invalid Op */ | ||
1076 | info.si_code = FPE_FLTINV; | ||
1077 | break; | ||
1078 | case 0x002: /* Denormalize */ | ||
1079 | case 0x010: /* Underflow */ | ||
1080 | info.si_code = FPE_FLTUND; | ||
1081 | break; | ||
1082 | case 0x004: /* Zero Divide */ | ||
1083 | info.si_code = FPE_FLTDIV; | ||
1084 | break; | ||
1085 | case 0x008: /* Overflow */ | ||
1086 | info.si_code = FPE_FLTOVF; | ||
1087 | break; | ||
1088 | case 0x020: /* Precision */ | ||
1089 | info.si_code = FPE_FLTRES; | ||
1090 | break; | ||
1091 | } | ||
1092 | force_sig_info(SIGFPE, &info, task); | ||
1093 | } | ||
1094 | |||
1095 | asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs) | ||
1096 | { | ||
1097 | } | ||
1098 | |||
1099 | asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) | ||
1100 | { | ||
1101 | } | ||
1102 | |||
1103 | asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void) | ||
1104 | { | ||
1105 | } | ||
1106 | |||
1107 | /* | ||
1108 | * 'math_state_restore()' saves the current math information in the | ||
1109 | * old math state array, and gets the new ones from the current task | ||
1110 | * | ||
1111 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. | ||
1112 | * Don't touch unless you *really* know how it works. | ||
1113 | */ | ||
1114 | asmlinkage void math_state_restore(void) | ||
1115 | { | ||
1116 | struct task_struct *me = current; | ||
1117 | |||
1118 | if (!used_math()) { | ||
1119 | local_irq_enable(); | ||
1120 | /* | ||
1121 | * does a slab alloc which can sleep | ||
1122 | */ | ||
1123 | if (init_fpu(me)) { | ||
1124 | /* | ||
1125 | * ran out of memory! | ||
1126 | */ | ||
1127 | do_group_exit(SIGKILL); | ||
1128 | return; | ||
1129 | } | ||
1130 | local_irq_disable(); | ||
1131 | } | ||
1132 | |||
1133 | clts(); /* Allow maths ops (or we recurse) */ | ||
1134 | /* | ||
1135 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. | ||
1136 | */ | ||
1137 | if (unlikely(restore_fpu_checking(&me->thread.xstate->fxsave))) { | ||
1138 | stts(); | ||
1139 | force_sig(SIGSEGV, me); | ||
1140 | return; | ||
1141 | } | ||
1142 | task_thread_info(me)->status |= TS_USEDFPU; | ||
1143 | me->fpu_counter++; | ||
1144 | } | ||
1145 | EXPORT_SYMBOL_GPL(math_state_restore); | ||
1146 | |||
1147 | void __init trap_init(void) | ||
1148 | { | ||
1149 | set_intr_gate(0, ÷_error); | ||
1150 | set_intr_gate_ist(1, &debug, DEBUG_STACK); | ||
1151 | set_intr_gate_ist(2, &nmi, NMI_STACK); | ||
1152 | set_system_gate_ist(3, &int3, DEBUG_STACK); /* int3 can be called from all */ | ||
1153 | set_system_gate(4, &overflow); /* int4 can be called from all */ | ||
1154 | set_intr_gate(5, &bounds); | ||
1155 | set_intr_gate(6, &invalid_op); | ||
1156 | set_intr_gate(7, &device_not_available); | ||
1157 | set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK); | ||
1158 | set_intr_gate(9, &coprocessor_segment_overrun); | ||
1159 | set_intr_gate(10, &invalid_TSS); | ||
1160 | set_intr_gate(11, &segment_not_present); | ||
1161 | set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK); | ||
1162 | set_intr_gate(13, &general_protection); | ||
1163 | set_intr_gate(14, &page_fault); | ||
1164 | set_intr_gate(15, &spurious_interrupt_bug); | ||
1165 | set_intr_gate(16, &coprocessor_error); | ||
1166 | set_intr_gate(17, &alignment_check); | ||
1167 | #ifdef CONFIG_X86_MCE | ||
1168 | set_intr_gate_ist(18, &machine_check, MCE_STACK); | ||
1169 | #endif | ||
1170 | set_intr_gate(19, &simd_coprocessor_error); | ||
1171 | |||
1172 | #ifdef CONFIG_IA32_EMULATION | ||
1173 | set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall); | ||
1174 | #endif | ||
1175 | /* | ||
1176 | * initialize the per thread extended state: | ||
1177 | */ | ||
1178 | init_thread_xstate(); | ||
1179 | /* | ||
1180 | * Should be a barrier for any external CPU state: | ||
1181 | */ | ||
1182 | cpu_init(); | ||
1183 | } | ||
1184 | |||
1185 | static int __init oops_setup(char *s) | ||
1186 | { | ||
1187 | if (!s) | ||
1188 | return -EINVAL; | ||
1189 | if (!strcmp(s, "panic")) | ||
1190 | panic_on_oops = 1; | ||
1191 | return 0; | ||
1192 | } | ||
1193 | early_param("oops", oops_setup); | ||
1194 | |||
1195 | static int __init kstack_setup(char *s) | ||
1196 | { | ||
1197 | if (!s) | ||
1198 | return -EINVAL; | ||
1199 | kstack_depth_to_print = simple_strtoul(s, NULL, 0); | ||
1200 | return 0; | ||
1201 | } | ||
1202 | early_param("kstack", kstack_setup); | ||
1203 | |||
1204 | static int __init code_bytes_setup(char *s) | ||
1205 | { | ||
1206 | code_bytes = simple_strtoul(s, NULL, 0); | ||
1207 | if (code_bytes > 8192) | ||
1208 | code_bytes = 8192; | ||
1209 | |||
1210 | return 1; | ||
1211 | } | ||
1212 | __setup("code_bytes=", code_bytes_setup); | ||