diff options
Diffstat (limited to 'arch/x86/kernel/traps_64.c')
-rw-r--r-- | arch/x86/kernel/traps_64.c | 571 |
1 files changed, 279 insertions, 292 deletions
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index adff76ea97c4..3f18d73f420c 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c | |||
@@ -10,73 +10,56 @@ | |||
10 | * 'Traps.c' handles hardware traps and faults after we have saved some | 10 | * 'Traps.c' handles hardware traps and faults after we have saved some |
11 | * state in 'entry.S'. | 11 | * state in 'entry.S'. |
12 | */ | 12 | */ |
13 | #include <linux/sched.h> | 13 | #include <linux/moduleparam.h> |
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/kallsyms.h> | ||
16 | #include <linux/spinlock.h> | ||
17 | #include <linux/kprobes.h> | ||
18 | #include <linux/uaccess.h> | ||
19 | #include <linux/utsname.h> | ||
20 | #include <linux/kdebug.h> | ||
14 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/module.h> | ||
23 | #include <linux/ptrace.h> | ||
15 | #include <linux/string.h> | 24 | #include <linux/string.h> |
25 | #include <linux/unwind.h> | ||
26 | #include <linux/delay.h> | ||
16 | #include <linux/errno.h> | 27 | #include <linux/errno.h> |
17 | #include <linux/ptrace.h> | 28 | #include <linux/kexec.h> |
29 | #include <linux/sched.h> | ||
18 | #include <linux/timer.h> | 30 | #include <linux/timer.h> |
19 | #include <linux/mm.h> | ||
20 | #include <linux/init.h> | 31 | #include <linux/init.h> |
21 | #include <linux/delay.h> | ||
22 | #include <linux/spinlock.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/kallsyms.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/moduleparam.h> | ||
27 | #include <linux/nmi.h> | ||
28 | #include <linux/kprobes.h> | ||
29 | #include <linux/kexec.h> | ||
30 | #include <linux/unwind.h> | ||
31 | #include <linux/uaccess.h> | ||
32 | #include <linux/bug.h> | 32 | #include <linux/bug.h> |
33 | #include <linux/kdebug.h> | 33 | #include <linux/nmi.h> |
34 | #include <linux/utsname.h> | 34 | #include <linux/mm.h> |
35 | |||
36 | #include <mach_traps.h> | ||
37 | 35 | ||
38 | #if defined(CONFIG_EDAC) | 36 | #if defined(CONFIG_EDAC) |
39 | #include <linux/edac.h> | 37 | #include <linux/edac.h> |
40 | #endif | 38 | #endif |
41 | 39 | ||
42 | #include <asm/system.h> | 40 | #include <asm/stacktrace.h> |
43 | #include <asm/io.h> | 41 | #include <asm/processor.h> |
44 | #include <asm/atomic.h> | ||
45 | #include <asm/debugreg.h> | 42 | #include <asm/debugreg.h> |
43 | #include <asm/atomic.h> | ||
44 | #include <asm/system.h> | ||
45 | #include <asm/unwind.h> | ||
46 | #include <asm/desc.h> | 46 | #include <asm/desc.h> |
47 | #include <asm/i387.h> | 47 | #include <asm/i387.h> |
48 | #include <asm/processor.h> | 48 | #include <asm/nmi.h> |
49 | #include <asm/unwind.h> | ||
50 | #include <asm/smp.h> | 49 | #include <asm/smp.h> |
50 | #include <asm/io.h> | ||
51 | #include <asm/pgalloc.h> | 51 | #include <asm/pgalloc.h> |
52 | #include <asm/pda.h> | ||
53 | #include <asm/proto.h> | 52 | #include <asm/proto.h> |
54 | #include <asm/nmi.h> | 53 | #include <asm/pda.h> |
55 | #include <asm/stacktrace.h> | 54 | #include <asm/traps.h> |
56 | 55 | ||
57 | asmlinkage void divide_error(void); | 56 | #include <mach_traps.h> |
58 | asmlinkage void debug(void); | ||
59 | asmlinkage void nmi(void); | ||
60 | asmlinkage void int3(void); | ||
61 | asmlinkage void overflow(void); | ||
62 | asmlinkage void bounds(void); | ||
63 | asmlinkage void invalid_op(void); | ||
64 | asmlinkage void device_not_available(void); | ||
65 | asmlinkage void double_fault(void); | ||
66 | asmlinkage void coprocessor_segment_overrun(void); | ||
67 | asmlinkage void invalid_TSS(void); | ||
68 | asmlinkage void segment_not_present(void); | ||
69 | asmlinkage void stack_segment(void); | ||
70 | asmlinkage void general_protection(void); | ||
71 | asmlinkage void page_fault(void); | ||
72 | asmlinkage void coprocessor_error(void); | ||
73 | asmlinkage void simd_coprocessor_error(void); | ||
74 | asmlinkage void reserved(void); | ||
75 | asmlinkage void alignment_check(void); | ||
76 | asmlinkage void machine_check(void); | ||
77 | asmlinkage void spurious_interrupt_bug(void); | ||
78 | 57 | ||
58 | int panic_on_unrecovered_nmi; | ||
59 | int kstack_depth_to_print = 12; | ||
79 | static unsigned int code_bytes = 64; | 60 | static unsigned int code_bytes = 64; |
61 | static int ignore_nmis; | ||
62 | static int die_counter; | ||
80 | 63 | ||
81 | static inline void conditional_sti(struct pt_regs *regs) | 64 | static inline void conditional_sti(struct pt_regs *regs) |
82 | { | 65 | { |
@@ -100,34 +83,9 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) | |||
100 | dec_preempt_count(); | 83 | dec_preempt_count(); |
101 | } | 84 | } |
102 | 85 | ||
103 | int kstack_depth_to_print = 12; | ||
104 | |||
105 | void printk_address(unsigned long address, int reliable) | 86 | void printk_address(unsigned long address, int reliable) |
106 | { | 87 | { |
107 | #ifdef CONFIG_KALLSYMS | 88 | printk(" [<%016lx>] %s%pS\n", address, reliable ? "": "? ", (void *) address); |
108 | unsigned long offset = 0, symsize; | ||
109 | const char *symname; | ||
110 | char *modname; | ||
111 | char *delim = ":"; | ||
112 | char namebuf[KSYM_NAME_LEN]; | ||
113 | char reliab[4] = ""; | ||
114 | |||
115 | symname = kallsyms_lookup(address, &symsize, &offset, | ||
116 | &modname, namebuf); | ||
117 | if (!symname) { | ||
118 | printk(" [<%016lx>]\n", address); | ||
119 | return; | ||
120 | } | ||
121 | if (!reliable) | ||
122 | strcpy(reliab, "? "); | ||
123 | |||
124 | if (!modname) | ||
125 | modname = delim = ""; | ||
126 | printk(" [<%016lx>] %s%s%s%s%s+0x%lx/0x%lx\n", | ||
127 | address, reliab, delim, modname, delim, symname, offset, symsize); | ||
128 | #else | ||
129 | printk(" [<%016lx>]\n", address); | ||
130 | #endif | ||
131 | } | 89 | } |
132 | 90 | ||
133 | static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | 91 | static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, |
@@ -204,8 +162,6 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | |||
204 | return NULL; | 162 | return NULL; |
205 | } | 163 | } |
206 | 164 | ||
207 | #define MSG(txt) ops->warning(data, txt) | ||
208 | |||
209 | /* | 165 | /* |
210 | * x86-64 can have up to three kernel stacks: | 166 | * x86-64 can have up to three kernel stacks: |
211 | * process stack | 167 | * process stack |
@@ -232,11 +188,11 @@ struct stack_frame { | |||
232 | unsigned long return_address; | 188 | unsigned long return_address; |
233 | }; | 189 | }; |
234 | 190 | ||
235 | 191 | static inline unsigned long | |
236 | static inline unsigned long print_context_stack(struct thread_info *tinfo, | 192 | print_context_stack(struct thread_info *tinfo, |
237 | unsigned long *stack, unsigned long bp, | 193 | unsigned long *stack, unsigned long bp, |
238 | const struct stacktrace_ops *ops, void *data, | 194 | const struct stacktrace_ops *ops, void *data, |
239 | unsigned long *end) | 195 | unsigned long *end) |
240 | { | 196 | { |
241 | struct stack_frame *frame = (struct stack_frame *)bp; | 197 | struct stack_frame *frame = (struct stack_frame *)bp; |
242 | 198 | ||
@@ -258,7 +214,7 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo, | |||
258 | return bp; | 214 | return bp; |
259 | } | 215 | } |
260 | 216 | ||
261 | void dump_trace(struct task_struct *tsk, struct pt_regs *regs, | 217 | void dump_trace(struct task_struct *task, struct pt_regs *regs, |
262 | unsigned long *stack, unsigned long bp, | 218 | unsigned long *stack, unsigned long bp, |
263 | const struct stacktrace_ops *ops, void *data) | 219 | const struct stacktrace_ops *ops, void *data) |
264 | { | 220 | { |
@@ -267,36 +223,34 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, | |||
267 | unsigned used = 0; | 223 | unsigned used = 0; |
268 | struct thread_info *tinfo; | 224 | struct thread_info *tinfo; |
269 | 225 | ||
270 | if (!tsk) | 226 | if (!task) |
271 | tsk = current; | 227 | task = current; |
272 | tinfo = task_thread_info(tsk); | ||
273 | 228 | ||
274 | if (!stack) { | 229 | if (!stack) { |
275 | unsigned long dummy; | 230 | unsigned long dummy; |
276 | stack = &dummy; | 231 | stack = &dummy; |
277 | if (tsk && tsk != current) | 232 | if (task && task != current) |
278 | stack = (unsigned long *)tsk->thread.sp; | 233 | stack = (unsigned long *)task->thread.sp; |
279 | } | 234 | } |
280 | 235 | ||
281 | #ifdef CONFIG_FRAME_POINTER | 236 | #ifdef CONFIG_FRAME_POINTER |
282 | if (!bp) { | 237 | if (!bp) { |
283 | if (tsk == current) { | 238 | if (task == current) { |
284 | /* Grab bp right from our regs */ | 239 | /* Grab bp right from our regs */ |
285 | asm("movq %%rbp, %0" : "=r" (bp):); | 240 | asm("movq %%rbp, %0" : "=r" (bp) :); |
286 | } else { | 241 | } else { |
287 | /* bp is the last reg pushed by switch_to */ | 242 | /* bp is the last reg pushed by switch_to */ |
288 | bp = *(unsigned long *) tsk->thread.sp; | 243 | bp = *(unsigned long *) task->thread.sp; |
289 | } | 244 | } |
290 | } | 245 | } |
291 | #endif | 246 | #endif |
292 | 247 | ||
293 | |||
294 | |||
295 | /* | 248 | /* |
296 | * Print function call entries in all stacks, starting at the | 249 | * Print function call entries in all stacks, starting at the |
297 | * current stack address. If the stacks consist of nested | 250 | * current stack address. If the stacks consist of nested |
298 | * exceptions | 251 | * exceptions |
299 | */ | 252 | */ |
253 | tinfo = task_thread_info(task); | ||
300 | for (;;) { | 254 | for (;;) { |
301 | char *id; | 255 | char *id; |
302 | unsigned long *estack_end; | 256 | unsigned long *estack_end; |
@@ -381,18 +335,24 @@ static const struct stacktrace_ops print_trace_ops = { | |||
381 | .address = print_trace_address, | 335 | .address = print_trace_address, |
382 | }; | 336 | }; |
383 | 337 | ||
384 | void | 338 | static void |
385 | show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack, | 339 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, |
386 | unsigned long bp) | 340 | unsigned long *stack, unsigned long bp, char *log_lvl) |
387 | { | 341 | { |
388 | printk("\nCall Trace:\n"); | 342 | printk("\nCall Trace:\n"); |
389 | dump_trace(tsk, regs, stack, bp, &print_trace_ops, NULL); | 343 | dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); |
390 | printk("\n"); | 344 | printk("\n"); |
391 | } | 345 | } |
392 | 346 | ||
347 | void show_trace(struct task_struct *task, struct pt_regs *regs, | ||
348 | unsigned long *stack, unsigned long bp) | ||
349 | { | ||
350 | show_trace_log_lvl(task, regs, stack, bp, ""); | ||
351 | } | ||
352 | |||
393 | static void | 353 | static void |
394 | _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp, | 354 | show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, |
395 | unsigned long bp) | 355 | unsigned long *sp, unsigned long bp, char *log_lvl) |
396 | { | 356 | { |
397 | unsigned long *stack; | 357 | unsigned long *stack; |
398 | int i; | 358 | int i; |
@@ -404,14 +364,14 @@ _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp, | |||
404 | // back trace for this cpu. | 364 | // back trace for this cpu. |
405 | 365 | ||
406 | if (sp == NULL) { | 366 | if (sp == NULL) { |
407 | if (tsk) | 367 | if (task) |
408 | sp = (unsigned long *)tsk->thread.sp; | 368 | sp = (unsigned long *)task->thread.sp; |
409 | else | 369 | else |
410 | sp = (unsigned long *)&sp; | 370 | sp = (unsigned long *)&sp; |
411 | } | 371 | } |
412 | 372 | ||
413 | stack = sp; | 373 | stack = sp; |
414 | for(i=0; i < kstack_depth_to_print; i++) { | 374 | for (i = 0; i < kstack_depth_to_print; i++) { |
415 | if (stack >= irqstack && stack <= irqstack_end) { | 375 | if (stack >= irqstack && stack <= irqstack_end) { |
416 | if (stack == irqstack_end) { | 376 | if (stack == irqstack_end) { |
417 | stack = (unsigned long *) (irqstack_end[-1]); | 377 | stack = (unsigned long *) (irqstack_end[-1]); |
@@ -426,12 +386,12 @@ _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp, | |||
426 | printk(" %016lx", *stack++); | 386 | printk(" %016lx", *stack++); |
427 | touch_nmi_watchdog(); | 387 | touch_nmi_watchdog(); |
428 | } | 388 | } |
429 | show_trace(tsk, regs, sp, bp); | 389 | show_trace_log_lvl(task, regs, sp, bp, log_lvl); |
430 | } | 390 | } |
431 | 391 | ||
432 | void show_stack(struct task_struct *tsk, unsigned long * sp) | 392 | void show_stack(struct task_struct *task, unsigned long *sp) |
433 | { | 393 | { |
434 | _show_stack(tsk, NULL, sp, 0); | 394 | show_stack_log_lvl(task, NULL, sp, 0, ""); |
435 | } | 395 | } |
436 | 396 | ||
437 | /* | 397 | /* |
@@ -439,8 +399,8 @@ void show_stack(struct task_struct *tsk, unsigned long * sp) | |||
439 | */ | 399 | */ |
440 | void dump_stack(void) | 400 | void dump_stack(void) |
441 | { | 401 | { |
442 | unsigned long dummy; | ||
443 | unsigned long bp = 0; | 402 | unsigned long bp = 0; |
403 | unsigned long stack; | ||
444 | 404 | ||
445 | #ifdef CONFIG_FRAME_POINTER | 405 | #ifdef CONFIG_FRAME_POINTER |
446 | if (!bp) | 406 | if (!bp) |
@@ -452,7 +412,7 @@ void dump_stack(void) | |||
452 | init_utsname()->release, | 412 | init_utsname()->release, |
453 | (int)strcspn(init_utsname()->version, " "), | 413 | (int)strcspn(init_utsname()->version, " "), |
454 | init_utsname()->version); | 414 | init_utsname()->version); |
455 | show_trace(NULL, NULL, &dummy, bp); | 415 | show_trace(NULL, NULL, &stack, bp); |
456 | } | 416 | } |
457 | 417 | ||
458 | EXPORT_SYMBOL(dump_stack); | 418 | EXPORT_SYMBOL(dump_stack); |
@@ -463,12 +423,8 @@ void show_registers(struct pt_regs *regs) | |||
463 | unsigned long sp; | 423 | unsigned long sp; |
464 | const int cpu = smp_processor_id(); | 424 | const int cpu = smp_processor_id(); |
465 | struct task_struct *cur = cpu_pda(cpu)->pcurrent; | 425 | struct task_struct *cur = cpu_pda(cpu)->pcurrent; |
466 | u8 *ip; | ||
467 | unsigned int code_prologue = code_bytes * 43 / 64; | ||
468 | unsigned int code_len = code_bytes; | ||
469 | 426 | ||
470 | sp = regs->sp; | 427 | sp = regs->sp; |
471 | ip = (u8 *) regs->ip - code_prologue; | ||
472 | printk("CPU %d ", cpu); | 428 | printk("CPU %d ", cpu); |
473 | __show_regs(regs); | 429 | __show_regs(regs); |
474 | printk("Process %s (pid: %d, threadinfo %p, task %p)\n", | 430 | printk("Process %s (pid: %d, threadinfo %p, task %p)\n", |
@@ -479,15 +435,22 @@ void show_registers(struct pt_regs *regs) | |||
479 | * time of the fault.. | 435 | * time of the fault.. |
480 | */ | 436 | */ |
481 | if (!user_mode(regs)) { | 437 | if (!user_mode(regs)) { |
438 | unsigned int code_prologue = code_bytes * 43 / 64; | ||
439 | unsigned int code_len = code_bytes; | ||
482 | unsigned char c; | 440 | unsigned char c; |
441 | u8 *ip; | ||
442 | |||
483 | printk("Stack: "); | 443 | printk("Stack: "); |
484 | _show_stack(NULL, regs, (unsigned long *)sp, regs->bp); | 444 | show_stack_log_lvl(NULL, regs, (unsigned long *)sp, |
445 | regs->bp, ""); | ||
485 | printk("\n"); | 446 | printk("\n"); |
486 | 447 | ||
487 | printk(KERN_EMERG "Code: "); | 448 | printk(KERN_EMERG "Code: "); |
449 | |||
450 | ip = (u8 *)regs->ip - code_prologue; | ||
488 | if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { | 451 | if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { |
489 | /* try starting at RIP */ | 452 | /* try starting at RIP */ |
490 | ip = (u8 *) regs->ip; | 453 | ip = (u8 *)regs->ip; |
491 | code_len = code_len - code_prologue + 1; | 454 | code_len = code_len - code_prologue + 1; |
492 | } | 455 | } |
493 | for (i = 0; i < code_len; i++, ip++) { | 456 | for (i = 0; i < code_len; i++, ip++) { |
@@ -503,7 +466,7 @@ void show_registers(struct pt_regs *regs) | |||
503 | } | 466 | } |
504 | } | 467 | } |
505 | printk("\n"); | 468 | printk("\n"); |
506 | } | 469 | } |
507 | 470 | ||
508 | int is_valid_bugaddr(unsigned long ip) | 471 | int is_valid_bugaddr(unsigned long ip) |
509 | { | 472 | { |
@@ -543,7 +506,7 @@ unsigned __kprobes long oops_begin(void) | |||
543 | } | 506 | } |
544 | 507 | ||
545 | void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) | 508 | void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) |
546 | { | 509 | { |
547 | die_owner = -1; | 510 | die_owner = -1; |
548 | bust_spinlocks(0); | 511 | bust_spinlocks(0); |
549 | die_nest_count--; | 512 | die_nest_count--; |
@@ -561,10 +524,9 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) | |||
561 | do_exit(signr); | 524 | do_exit(signr); |
562 | } | 525 | } |
563 | 526 | ||
564 | int __kprobes __die(const char * str, struct pt_regs * regs, long err) | 527 | int __kprobes __die(const char *str, struct pt_regs *regs, long err) |
565 | { | 528 | { |
566 | static int die_counter; | 529 | printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff, ++die_counter); |
567 | printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter); | ||
568 | #ifdef CONFIG_PREEMPT | 530 | #ifdef CONFIG_PREEMPT |
569 | printk("PREEMPT "); | 531 | printk("PREEMPT "); |
570 | #endif | 532 | #endif |
@@ -575,8 +537,10 @@ int __kprobes __die(const char * str, struct pt_regs * regs, long err) | |||
575 | printk("DEBUG_PAGEALLOC"); | 537 | printk("DEBUG_PAGEALLOC"); |
576 | #endif | 538 | #endif |
577 | printk("\n"); | 539 | printk("\n"); |
578 | if (notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) | 540 | if (notify_die(DIE_OOPS, str, regs, err, |
541 | current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) | ||
579 | return 1; | 542 | return 1; |
543 | |||
580 | show_registers(regs); | 544 | show_registers(regs); |
581 | add_taint(TAINT_DIE); | 545 | add_taint(TAINT_DIE); |
582 | /* Executive summary in case the oops scrolled away */ | 546 | /* Executive summary in case the oops scrolled away */ |
@@ -588,7 +552,7 @@ int __kprobes __die(const char * str, struct pt_regs * regs, long err) | |||
588 | return 0; | 552 | return 0; |
589 | } | 553 | } |
590 | 554 | ||
591 | void die(const char * str, struct pt_regs * regs, long err) | 555 | void die(const char *str, struct pt_regs *regs, long err) |
592 | { | 556 | { |
593 | unsigned long flags = oops_begin(); | 557 | unsigned long flags = oops_begin(); |
594 | 558 | ||
@@ -605,8 +569,7 @@ die_nmi(char *str, struct pt_regs *regs, int do_panic) | |||
605 | { | 569 | { |
606 | unsigned long flags; | 570 | unsigned long flags; |
607 | 571 | ||
608 | if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == | 572 | if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP) |
609 | NOTIFY_STOP) | ||
610 | return; | 573 | return; |
611 | 574 | ||
612 | flags = oops_begin(); | 575 | flags = oops_begin(); |
@@ -614,7 +577,9 @@ die_nmi(char *str, struct pt_regs *regs, int do_panic) | |||
614 | * We are in trouble anyway, lets at least try | 577 | * We are in trouble anyway, lets at least try |
615 | * to get a message out. | 578 | * to get a message out. |
616 | */ | 579 | */ |
617 | printk(str, smp_processor_id()); | 580 | printk(KERN_EMERG "%s", str); |
581 | printk(" on CPU%d, ip %08lx, registers:\n", | ||
582 | smp_processor_id(), regs->ip); | ||
618 | show_registers(regs); | 583 | show_registers(regs); |
619 | if (kexec_should_crash(current)) | 584 | if (kexec_should_crash(current)) |
620 | crash_kexec(regs); | 585 | crash_kexec(regs); |
@@ -626,44 +591,44 @@ die_nmi(char *str, struct pt_regs *regs, int do_panic) | |||
626 | do_exit(SIGBUS); | 591 | do_exit(SIGBUS); |
627 | } | 592 | } |
628 | 593 | ||
629 | static void __kprobes do_trap(int trapnr, int signr, char *str, | 594 | static void __kprobes |
630 | struct pt_regs * regs, long error_code, | 595 | do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, |
631 | siginfo_t *info) | 596 | long error_code, siginfo_t *info) |
632 | { | 597 | { |
633 | struct task_struct *tsk = current; | 598 | struct task_struct *tsk = current; |
634 | 599 | ||
635 | if (user_mode(regs)) { | 600 | if (!user_mode(regs)) |
636 | /* | 601 | goto kernel_trap; |
637 | * We want error_code and trap_no set for userspace | ||
638 | * faults and kernelspace faults which result in | ||
639 | * die(), but not kernelspace faults which are fixed | ||
640 | * up. die() gives the process no chance to handle | ||
641 | * the signal and notice the kernel fault information, | ||
642 | * so that won't result in polluting the information | ||
643 | * about previously queued, but not yet delivered, | ||
644 | * faults. See also do_general_protection below. | ||
645 | */ | ||
646 | tsk->thread.error_code = error_code; | ||
647 | tsk->thread.trap_no = trapnr; | ||
648 | |||
649 | if (show_unhandled_signals && unhandled_signal(tsk, signr) && | ||
650 | printk_ratelimit()) { | ||
651 | printk(KERN_INFO | ||
652 | "%s[%d] trap %s ip:%lx sp:%lx error:%lx", | ||
653 | tsk->comm, tsk->pid, str, | ||
654 | regs->ip, regs->sp, error_code); | ||
655 | print_vma_addr(" in ", regs->ip); | ||
656 | printk("\n"); | ||
657 | } | ||
658 | 602 | ||
659 | if (info) | 603 | /* |
660 | force_sig_info(signr, info, tsk); | 604 | * We want error_code and trap_no set for userspace faults and |
661 | else | 605 | * kernelspace faults which result in die(), but not |
662 | force_sig(signr, tsk); | 606 | * kernelspace faults which are fixed up. die() gives the |
663 | return; | 607 | * process no chance to handle the signal and notice the |
608 | * kernel fault information, so that won't result in polluting | ||
609 | * the information about previously queued, but not yet | ||
610 | * delivered, faults. See also do_general_protection below. | ||
611 | */ | ||
612 | tsk->thread.error_code = error_code; | ||
613 | tsk->thread.trap_no = trapnr; | ||
614 | |||
615 | if (show_unhandled_signals && unhandled_signal(tsk, signr) && | ||
616 | printk_ratelimit()) { | ||
617 | printk(KERN_INFO | ||
618 | "%s[%d] trap %s ip:%lx sp:%lx error:%lx", | ||
619 | tsk->comm, tsk->pid, str, | ||
620 | regs->ip, regs->sp, error_code); | ||
621 | print_vma_addr(" in ", regs->ip); | ||
622 | printk("\n"); | ||
664 | } | 623 | } |
665 | 624 | ||
625 | if (info) | ||
626 | force_sig_info(signr, info, tsk); | ||
627 | else | ||
628 | force_sig(signr, tsk); | ||
629 | return; | ||
666 | 630 | ||
631 | kernel_trap: | ||
667 | if (!fixup_exception(regs)) { | 632 | if (!fixup_exception(regs)) { |
668 | tsk->thread.error_code = error_code; | 633 | tsk->thread.error_code = error_code; |
669 | tsk->thread.trap_no = trapnr; | 634 | tsk->thread.trap_no = trapnr; |
@@ -673,41 +638,39 @@ static void __kprobes do_trap(int trapnr, int signr, char *str, | |||
673 | } | 638 | } |
674 | 639 | ||
675 | #define DO_ERROR(trapnr, signr, str, name) \ | 640 | #define DO_ERROR(trapnr, signr, str, name) \ |
676 | asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | 641 | asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ |
677 | { \ | 642 | { \ |
678 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | 643 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
679 | == NOTIFY_STOP) \ | 644 | == NOTIFY_STOP) \ |
680 | return; \ | 645 | return; \ |
681 | conditional_sti(regs); \ | 646 | conditional_sti(regs); \ |
682 | do_trap(trapnr, signr, str, regs, error_code, NULL); \ | 647 | do_trap(trapnr, signr, str, regs, error_code, NULL); \ |
683 | } | 648 | } |
684 | 649 | ||
685 | #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ | 650 | #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ |
686 | asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | 651 | asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ |
687 | { \ | 652 | { \ |
688 | siginfo_t info; \ | 653 | siginfo_t info; \ |
689 | info.si_signo = signr; \ | 654 | info.si_signo = signr; \ |
690 | info.si_errno = 0; \ | 655 | info.si_errno = 0; \ |
691 | info.si_code = sicode; \ | 656 | info.si_code = sicode; \ |
692 | info.si_addr = (void __user *)siaddr; \ | 657 | info.si_addr = (void __user *)siaddr; \ |
693 | trace_hardirqs_fixup(); \ | 658 | trace_hardirqs_fixup(); \ |
694 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | 659 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
695 | == NOTIFY_STOP) \ | 660 | == NOTIFY_STOP) \ |
696 | return; \ | 661 | return; \ |
697 | conditional_sti(regs); \ | 662 | conditional_sti(regs); \ |
698 | do_trap(trapnr, signr, str, regs, error_code, &info); \ | 663 | do_trap(trapnr, signr, str, regs, error_code, &info); \ |
699 | } | 664 | } |
700 | 665 | ||
701 | DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) | 666 | DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) |
702 | DO_ERROR( 4, SIGSEGV, "overflow", overflow) | 667 | DO_ERROR(4, SIGSEGV, "overflow", overflow) |
703 | DO_ERROR( 5, SIGSEGV, "bounds", bounds) | 668 | DO_ERROR(5, SIGSEGV, "bounds", bounds) |
704 | DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip) | 669 | DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip) |
705 | DO_ERROR( 7, SIGSEGV, "device not available", device_not_available) | 670 | DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) |
706 | DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) | ||
707 | DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) | 671 | DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) |
708 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) | 672 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) |
709 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) | 673 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) |
710 | DO_ERROR(18, SIGSEGV, "reserved", reserved) | ||
711 | 674 | ||
712 | /* Runs on IST stack */ | 675 | /* Runs on IST stack */ |
713 | asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code) | 676 | asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code) |
@@ -737,31 +700,34 @@ asmlinkage void do_double_fault(struct pt_regs * regs, long error_code) | |||
737 | die(str, regs, error_code); | 700 | die(str, regs, error_code); |
738 | } | 701 | } |
739 | 702 | ||
740 | asmlinkage void __kprobes do_general_protection(struct pt_regs * regs, | 703 | asmlinkage void __kprobes |
741 | long error_code) | 704 | do_general_protection(struct pt_regs *regs, long error_code) |
742 | { | 705 | { |
743 | struct task_struct *tsk = current; | 706 | struct task_struct *tsk; |
744 | 707 | ||
745 | conditional_sti(regs); | 708 | conditional_sti(regs); |
746 | 709 | ||
747 | if (user_mode(regs)) { | 710 | tsk = current; |
748 | tsk->thread.error_code = error_code; | 711 | if (!user_mode(regs)) |
749 | tsk->thread.trap_no = 13; | 712 | goto gp_in_kernel; |
750 | |||
751 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && | ||
752 | printk_ratelimit()) { | ||
753 | printk(KERN_INFO | ||
754 | "%s[%d] general protection ip:%lx sp:%lx error:%lx", | ||
755 | tsk->comm, tsk->pid, | ||
756 | regs->ip, regs->sp, error_code); | ||
757 | print_vma_addr(" in ", regs->ip); | ||
758 | printk("\n"); | ||
759 | } | ||
760 | 713 | ||
761 | force_sig(SIGSEGV, tsk); | 714 | tsk->thread.error_code = error_code; |
762 | return; | 715 | tsk->thread.trap_no = 13; |
763 | } | 716 | |
717 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && | ||
718 | printk_ratelimit()) { | ||
719 | printk(KERN_INFO | ||
720 | "%s[%d] general protection ip:%lx sp:%lx error:%lx", | ||
721 | tsk->comm, tsk->pid, | ||
722 | regs->ip, regs->sp, error_code); | ||
723 | print_vma_addr(" in ", regs->ip); | ||
724 | printk("\n"); | ||
725 | } | ||
726 | |||
727 | force_sig(SIGSEGV, tsk); | ||
728 | return; | ||
764 | 729 | ||
730 | gp_in_kernel: | ||
765 | if (fixup_exception(regs)) | 731 | if (fixup_exception(regs)) |
766 | return; | 732 | return; |
767 | 733 | ||
@@ -774,14 +740,14 @@ asmlinkage void __kprobes do_general_protection(struct pt_regs * regs, | |||
774 | } | 740 | } |
775 | 741 | ||
776 | static notrace __kprobes void | 742 | static notrace __kprobes void |
777 | mem_parity_error(unsigned char reason, struct pt_regs * regs) | 743 | mem_parity_error(unsigned char reason, struct pt_regs *regs) |
778 | { | 744 | { |
779 | printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", | 745 | printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", |
780 | reason); | 746 | reason); |
781 | printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n"); | 747 | printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n"); |
782 | 748 | ||
783 | #if defined(CONFIG_EDAC) | 749 | #if defined(CONFIG_EDAC) |
784 | if(edac_handler_set()) { | 750 | if (edac_handler_set()) { |
785 | edac_atomic_assert_error(); | 751 | edac_atomic_assert_error(); |
786 | return; | 752 | return; |
787 | } | 753 | } |
@@ -798,7 +764,7 @@ mem_parity_error(unsigned char reason, struct pt_regs * regs) | |||
798 | } | 764 | } |
799 | 765 | ||
800 | static notrace __kprobes void | 766 | static notrace __kprobes void |
801 | io_check_error(unsigned char reason, struct pt_regs * regs) | 767 | io_check_error(unsigned char reason, struct pt_regs *regs) |
802 | { | 768 | { |
803 | printk("NMI: IOCK error (debug interrupt?)\n"); | 769 | printk("NMI: IOCK error (debug interrupt?)\n"); |
804 | show_registers(regs); | 770 | show_registers(regs); |
@@ -828,14 +794,14 @@ unknown_nmi_error(unsigned char reason, struct pt_regs * regs) | |||
828 | 794 | ||
829 | /* Runs on IST stack. This code must keep interrupts off all the time. | 795 | /* Runs on IST stack. This code must keep interrupts off all the time. |
830 | Nested NMIs are prevented by the CPU. */ | 796 | Nested NMIs are prevented by the CPU. */ |
831 | asmlinkage notrace __kprobes void default_do_nmi(struct pt_regs *regs) | 797 | asmlinkage notrace __kprobes void default_do_nmi(struct pt_regs *regs) |
832 | { | 798 | { |
833 | unsigned char reason = 0; | 799 | unsigned char reason = 0; |
834 | int cpu; | 800 | int cpu; |
835 | 801 | ||
836 | cpu = smp_processor_id(); | 802 | cpu = smp_processor_id(); |
837 | 803 | ||
838 | /* Only the BSP gets external NMIs from the system. */ | 804 | /* Only the BSP gets external NMIs from the system. */ |
839 | if (!cpu) | 805 | if (!cpu) |
840 | reason = get_nmi_reason(); | 806 | reason = get_nmi_reason(); |
841 | 807 | ||
@@ -847,32 +813,57 @@ asmlinkage notrace __kprobes void default_do_nmi(struct pt_regs *regs) | |||
847 | * Ok, so this is none of the documented NMI sources, | 813 | * Ok, so this is none of the documented NMI sources, |
848 | * so it must be the NMI watchdog. | 814 | * so it must be the NMI watchdog. |
849 | */ | 815 | */ |
850 | if (nmi_watchdog_tick(regs,reason)) | 816 | if (nmi_watchdog_tick(regs, reason)) |
851 | return; | 817 | return; |
852 | if (!do_nmi_callback(regs,cpu)) | 818 | if (!do_nmi_callback(regs, cpu)) |
853 | unknown_nmi_error(reason, regs); | 819 | unknown_nmi_error(reason, regs); |
854 | 820 | ||
855 | return; | 821 | return; |
856 | } | 822 | } |
857 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) | 823 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) |
858 | return; | 824 | return; |
859 | 825 | ||
860 | /* AK: following checks seem to be broken on modern chipsets. FIXME */ | 826 | /* AK: following checks seem to be broken on modern chipsets. FIXME */ |
861 | |||
862 | if (reason & 0x80) | 827 | if (reason & 0x80) |
863 | mem_parity_error(reason, regs); | 828 | mem_parity_error(reason, regs); |
864 | if (reason & 0x40) | 829 | if (reason & 0x40) |
865 | io_check_error(reason, regs); | 830 | io_check_error(reason, regs); |
866 | } | 831 | } |
867 | 832 | ||
833 | asmlinkage notrace __kprobes void | ||
834 | do_nmi(struct pt_regs *regs, long error_code) | ||
835 | { | ||
836 | nmi_enter(); | ||
837 | |||
838 | add_pda(__nmi_count, 1); | ||
839 | |||
840 | if (!ignore_nmis) | ||
841 | default_do_nmi(regs); | ||
842 | |||
843 | nmi_exit(); | ||
844 | } | ||
845 | |||
846 | void stop_nmi(void) | ||
847 | { | ||
848 | acpi_nmi_disable(); | ||
849 | ignore_nmis++; | ||
850 | } | ||
851 | |||
852 | void restart_nmi(void) | ||
853 | { | ||
854 | ignore_nmis--; | ||
855 | acpi_nmi_enable(); | ||
856 | } | ||
857 | |||
868 | /* runs on IST stack. */ | 858 | /* runs on IST stack. */ |
869 | asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code) | 859 | asmlinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) |
870 | { | 860 | { |
871 | trace_hardirqs_fixup(); | 861 | trace_hardirqs_fixup(); |
872 | 862 | ||
873 | if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) { | 863 | if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) |
864 | == NOTIFY_STOP) | ||
874 | return; | 865 | return; |
875 | } | 866 | |
876 | preempt_conditional_sti(regs); | 867 | preempt_conditional_sti(regs); |
877 | do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); | 868 | do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); |
878 | preempt_conditional_cli(regs); | 869 | preempt_conditional_cli(regs); |
@@ -903,8 +894,8 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) | |||
903 | asmlinkage void __kprobes do_debug(struct pt_regs * regs, | 894 | asmlinkage void __kprobes do_debug(struct pt_regs * regs, |
904 | unsigned long error_code) | 895 | unsigned long error_code) |
905 | { | 896 | { |
906 | unsigned long condition; | ||
907 | struct task_struct *tsk = current; | 897 | struct task_struct *tsk = current; |
898 | unsigned long condition; | ||
908 | siginfo_t info; | 899 | siginfo_t info; |
909 | 900 | ||
910 | trace_hardirqs_fixup(); | 901 | trace_hardirqs_fixup(); |
@@ -925,21 +916,19 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs, | |||
925 | 916 | ||
926 | /* Mask out spurious debug traps due to lazy DR7 setting */ | 917 | /* Mask out spurious debug traps due to lazy DR7 setting */ |
927 | if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { | 918 | if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { |
928 | if (!tsk->thread.debugreg7) { | 919 | if (!tsk->thread.debugreg7) |
929 | goto clear_dr7; | 920 | goto clear_dr7; |
930 | } | ||
931 | } | 921 | } |
932 | 922 | ||
933 | tsk->thread.debugreg6 = condition; | 923 | tsk->thread.debugreg6 = condition; |
934 | 924 | ||
935 | |||
936 | /* | 925 | /* |
937 | * Single-stepping through TF: make sure we ignore any events in | 926 | * Single-stepping through TF: make sure we ignore any events in |
938 | * kernel space (but re-enable TF when returning to user mode). | 927 | * kernel space (but re-enable TF when returning to user mode). |
939 | */ | 928 | */ |
940 | if (condition & DR_STEP) { | 929 | if (condition & DR_STEP) { |
941 | if (!user_mode(regs)) | 930 | if (!user_mode(regs)) |
942 | goto clear_TF_reenable; | 931 | goto clear_TF_reenable; |
943 | } | 932 | } |
944 | 933 | ||
945 | /* Ok, finally something we can handle */ | 934 | /* Ok, finally something we can handle */ |
@@ -952,7 +941,7 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs, | |||
952 | force_sig_info(SIGTRAP, &info, tsk); | 941 | force_sig_info(SIGTRAP, &info, tsk); |
953 | 942 | ||
954 | clear_dr7: | 943 | clear_dr7: |
955 | set_debugreg(0UL, 7); | 944 | set_debugreg(0, 7); |
956 | preempt_conditional_cli(regs); | 945 | preempt_conditional_cli(regs); |
957 | return; | 946 | return; |
958 | 947 | ||
@@ -960,6 +949,7 @@ clear_TF_reenable: | |||
960 | set_tsk_thread_flag(tsk, TIF_SINGLESTEP); | 949 | set_tsk_thread_flag(tsk, TIF_SINGLESTEP); |
961 | regs->flags &= ~X86_EFLAGS_TF; | 950 | regs->flags &= ~X86_EFLAGS_TF; |
962 | preempt_conditional_cli(regs); | 951 | preempt_conditional_cli(regs); |
952 | return; | ||
963 | } | 953 | } |
964 | 954 | ||
965 | static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr) | 955 | static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr) |
@@ -982,7 +972,7 @@ static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr) | |||
982 | asmlinkage void do_coprocessor_error(struct pt_regs *regs) | 972 | asmlinkage void do_coprocessor_error(struct pt_regs *regs) |
983 | { | 973 | { |
984 | void __user *ip = (void __user *)(regs->ip); | 974 | void __user *ip = (void __user *)(regs->ip); |
985 | struct task_struct * task; | 975 | struct task_struct *task; |
986 | siginfo_t info; | 976 | siginfo_t info; |
987 | unsigned short cwd, swd; | 977 | unsigned short cwd, swd; |
988 | 978 | ||
@@ -1015,30 +1005,30 @@ asmlinkage void do_coprocessor_error(struct pt_regs *regs) | |||
1015 | cwd = get_fpu_cwd(task); | 1005 | cwd = get_fpu_cwd(task); |
1016 | swd = get_fpu_swd(task); | 1006 | swd = get_fpu_swd(task); |
1017 | switch (swd & ~cwd & 0x3f) { | 1007 | switch (swd & ~cwd & 0x3f) { |
1018 | case 0x000: | 1008 | case 0x000: /* No unmasked exception */ |
1019 | default: | 1009 | default: /* Multiple exceptions */ |
1020 | break; | 1010 | break; |
1021 | case 0x001: /* Invalid Op */ | 1011 | case 0x001: /* Invalid Op */ |
1022 | /* | 1012 | /* |
1023 | * swd & 0x240 == 0x040: Stack Underflow | 1013 | * swd & 0x240 == 0x040: Stack Underflow |
1024 | * swd & 0x240 == 0x240: Stack Overflow | 1014 | * swd & 0x240 == 0x240: Stack Overflow |
1025 | * User must clear the SF bit (0x40) if set | 1015 | * User must clear the SF bit (0x40) if set |
1026 | */ | 1016 | */ |
1027 | info.si_code = FPE_FLTINV; | 1017 | info.si_code = FPE_FLTINV; |
1028 | break; | 1018 | break; |
1029 | case 0x002: /* Denormalize */ | 1019 | case 0x002: /* Denormalize */ |
1030 | case 0x010: /* Underflow */ | 1020 | case 0x010: /* Underflow */ |
1031 | info.si_code = FPE_FLTUND; | 1021 | info.si_code = FPE_FLTUND; |
1032 | break; | 1022 | break; |
1033 | case 0x004: /* Zero Divide */ | 1023 | case 0x004: /* Zero Divide */ |
1034 | info.si_code = FPE_FLTDIV; | 1024 | info.si_code = FPE_FLTDIV; |
1035 | break; | 1025 | break; |
1036 | case 0x008: /* Overflow */ | 1026 | case 0x008: /* Overflow */ |
1037 | info.si_code = FPE_FLTOVF; | 1027 | info.si_code = FPE_FLTOVF; |
1038 | break; | 1028 | break; |
1039 | case 0x020: /* Precision */ | 1029 | case 0x020: /* Precision */ |
1040 | info.si_code = FPE_FLTRES; | 1030 | info.si_code = FPE_FLTRES; |
1041 | break; | 1031 | break; |
1042 | } | 1032 | } |
1043 | force_sig_info(SIGFPE, &info, task); | 1033 | force_sig_info(SIGFPE, &info, task); |
1044 | } | 1034 | } |
@@ -1051,7 +1041,7 @@ asmlinkage void bad_intr(void) | |||
1051 | asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) | 1041 | asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) |
1052 | { | 1042 | { |
1053 | void __user *ip = (void __user *)(regs->ip); | 1043 | void __user *ip = (void __user *)(regs->ip); |
1054 | struct task_struct * task; | 1044 | struct task_struct *task; |
1055 | siginfo_t info; | 1045 | siginfo_t info; |
1056 | unsigned short mxcsr; | 1046 | unsigned short mxcsr; |
1057 | 1047 | ||
@@ -1079,25 +1069,25 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) | |||
1079 | */ | 1069 | */ |
1080 | mxcsr = get_fpu_mxcsr(task); | 1070 | mxcsr = get_fpu_mxcsr(task); |
1081 | switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { | 1071 | switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { |
1082 | case 0x000: | 1072 | case 0x000: |
1083 | default: | 1073 | default: |
1084 | break; | 1074 | break; |
1085 | case 0x001: /* Invalid Op */ | 1075 | case 0x001: /* Invalid Op */ |
1086 | info.si_code = FPE_FLTINV; | 1076 | info.si_code = FPE_FLTINV; |
1087 | break; | 1077 | break; |
1088 | case 0x002: /* Denormalize */ | 1078 | case 0x002: /* Denormalize */ |
1089 | case 0x010: /* Underflow */ | 1079 | case 0x010: /* Underflow */ |
1090 | info.si_code = FPE_FLTUND; | 1080 | info.si_code = FPE_FLTUND; |
1091 | break; | 1081 | break; |
1092 | case 0x004: /* Zero Divide */ | 1082 | case 0x004: /* Zero Divide */ |
1093 | info.si_code = FPE_FLTDIV; | 1083 | info.si_code = FPE_FLTDIV; |
1094 | break; | 1084 | break; |
1095 | case 0x008: /* Overflow */ | 1085 | case 0x008: /* Overflow */ |
1096 | info.si_code = FPE_FLTOVF; | 1086 | info.si_code = FPE_FLTOVF; |
1097 | break; | 1087 | break; |
1098 | case 0x020: /* Precision */ | 1088 | case 0x020: /* Precision */ |
1099 | info.si_code = FPE_FLTRES; | 1089 | info.si_code = FPE_FLTRES; |
1100 | break; | 1090 | break; |
1101 | } | 1091 | } |
1102 | force_sig_info(SIGFPE, &info, task); | 1092 | force_sig_info(SIGFPE, &info, task); |
1103 | } | 1093 | } |
@@ -1115,7 +1105,7 @@ asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void) | |||
1115 | } | 1105 | } |
1116 | 1106 | ||
1117 | /* | 1107 | /* |
1118 | * 'math_state_restore()' saves the current math information in the | 1108 | * 'math_state_restore()' saves the current math information in the |
1119 | * old math state array, and gets the new ones from the current task | 1109 | * old math state array, and gets the new ones from the current task |
1120 | * | 1110 | * |
1121 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. | 1111 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. |
@@ -1140,7 +1130,7 @@ asmlinkage void math_state_restore(void) | |||
1140 | local_irq_disable(); | 1130 | local_irq_disable(); |
1141 | } | 1131 | } |
1142 | 1132 | ||
1143 | clts(); /* Allow maths ops (or we recurse) */ | 1133 | clts(); /* Allow maths ops (or we recurse) */ |
1144 | restore_fpu_checking(&me->thread.xstate->fxsave); | 1134 | restore_fpu_checking(&me->thread.xstate->fxsave); |
1145 | task_thread_info(me)->status |= TS_USEDFPU; | 1135 | task_thread_info(me)->status |= TS_USEDFPU; |
1146 | me->fpu_counter++; | 1136 | me->fpu_counter++; |
@@ -1149,64 +1139,61 @@ EXPORT_SYMBOL_GPL(math_state_restore); | |||
1149 | 1139 | ||
1150 | void __init trap_init(void) | 1140 | void __init trap_init(void) |
1151 | { | 1141 | { |
1152 | set_intr_gate(0,÷_error); | 1142 | set_intr_gate(0, ÷_error); |
1153 | set_intr_gate_ist(1,&debug,DEBUG_STACK); | 1143 | set_intr_gate_ist(1, &debug, DEBUG_STACK); |
1154 | set_intr_gate_ist(2,&nmi,NMI_STACK); | 1144 | set_intr_gate_ist(2, &nmi, NMI_STACK); |
1155 | set_system_gate_ist(3,&int3,DEBUG_STACK); /* int3 can be called from all */ | 1145 | set_system_gate_ist(3, &int3, DEBUG_STACK); /* int3 can be called from all */ |
1156 | set_system_gate(4,&overflow); /* int4 can be called from all */ | 1146 | set_system_gate(4, &overflow); /* int4 can be called from all */ |
1157 | set_intr_gate(5,&bounds); | 1147 | set_intr_gate(5, &bounds); |
1158 | set_intr_gate(6,&invalid_op); | 1148 | set_intr_gate(6, &invalid_op); |
1159 | set_intr_gate(7,&device_not_available); | 1149 | set_intr_gate(7, &device_not_available); |
1160 | set_intr_gate_ist(8,&double_fault, DOUBLEFAULT_STACK); | 1150 | set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK); |
1161 | set_intr_gate(9,&coprocessor_segment_overrun); | 1151 | set_intr_gate(9, &coprocessor_segment_overrun); |
1162 | set_intr_gate(10,&invalid_TSS); | 1152 | set_intr_gate(10, &invalid_TSS); |
1163 | set_intr_gate(11,&segment_not_present); | 1153 | set_intr_gate(11, &segment_not_present); |
1164 | set_intr_gate_ist(12,&stack_segment,STACKFAULT_STACK); | 1154 | set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK); |
1165 | set_intr_gate(13,&general_protection); | 1155 | set_intr_gate(13, &general_protection); |
1166 | set_intr_gate(14,&page_fault); | 1156 | set_intr_gate(14, &page_fault); |
1167 | set_intr_gate(15,&spurious_interrupt_bug); | 1157 | set_intr_gate(15, &spurious_interrupt_bug); |
1168 | set_intr_gate(16,&coprocessor_error); | 1158 | set_intr_gate(16, &coprocessor_error); |
1169 | set_intr_gate(17,&alignment_check); | 1159 | set_intr_gate(17, &alignment_check); |
1170 | #ifdef CONFIG_X86_MCE | 1160 | #ifdef CONFIG_X86_MCE |
1171 | set_intr_gate_ist(18,&machine_check, MCE_STACK); | 1161 | set_intr_gate_ist(18, &machine_check, MCE_STACK); |
1172 | #endif | 1162 | #endif |
1173 | set_intr_gate(19,&simd_coprocessor_error); | 1163 | set_intr_gate(19, &simd_coprocessor_error); |
1174 | 1164 | ||
1175 | #ifdef CONFIG_IA32_EMULATION | 1165 | #ifdef CONFIG_IA32_EMULATION |
1176 | set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall); | 1166 | set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall); |
1177 | #endif | 1167 | #endif |
1178 | |||
1179 | /* | 1168 | /* |
1180 | * initialize the per thread extended state: | 1169 | * initialize the per thread extended state: |
1181 | */ | 1170 | */ |
1182 | init_thread_xstate(); | 1171 | init_thread_xstate(); |
1183 | /* | 1172 | /* |
1184 | * Should be a barrier for any external CPU state. | 1173 | * Should be a barrier for any external CPU state: |
1185 | */ | 1174 | */ |
1186 | cpu_init(); | 1175 | cpu_init(); |
1187 | } | 1176 | } |
1188 | 1177 | ||
1189 | |||
1190 | static int __init oops_setup(char *s) | 1178 | static int __init oops_setup(char *s) |
1191 | { | 1179 | { |
1192 | if (!s) | 1180 | if (!s) |
1193 | return -EINVAL; | 1181 | return -EINVAL; |
1194 | if (!strcmp(s, "panic")) | 1182 | if (!strcmp(s, "panic")) |
1195 | panic_on_oops = 1; | 1183 | panic_on_oops = 1; |
1196 | return 0; | 1184 | return 0; |
1197 | } | 1185 | } |
1198 | early_param("oops", oops_setup); | 1186 | early_param("oops", oops_setup); |
1199 | 1187 | ||
1200 | static int __init kstack_setup(char *s) | 1188 | static int __init kstack_setup(char *s) |
1201 | { | 1189 | { |
1202 | if (!s) | 1190 | if (!s) |
1203 | return -EINVAL; | 1191 | return -EINVAL; |
1204 | kstack_depth_to_print = simple_strtoul(s,NULL,0); | 1192 | kstack_depth_to_print = simple_strtoul(s, NULL, 0); |
1205 | return 0; | 1193 | return 0; |
1206 | } | 1194 | } |
1207 | early_param("kstack", kstack_setup); | 1195 | early_param("kstack", kstack_setup); |
1208 | 1196 | ||
1209 | |||
1210 | static int __init code_bytes_setup(char *s) | 1197 | static int __init code_bytes_setup(char *s) |
1211 | { | 1198 | { |
1212 | code_bytes = simple_strtoul(s, NULL, 0); | 1199 | code_bytes = simple_strtoul(s, NULL, 0); |