aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/traps_64.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-18 13:53:16 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-18 13:53:16 -0400
commit9b610fda0df5d0f0b0c64242e37441ad1b384aac (patch)
tree0ea14b15f2e6546f37fe18d8ac3dc83077ec0e55 /arch/x86/kernel/traps_64.c
parentb8f8c3cf0a4ac0632ec3f0e15e9dc0c29de917af (diff)
parent5b664cb235e97afbf34db9c4d77f08ebd725335e (diff)
Merge branch 'linus' into timers/nohz
Diffstat (limited to 'arch/x86/kernel/traps_64.c')
-rw-r--r--arch/x86/kernel/traps_64.c541
1 files changed, 270 insertions, 271 deletions
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index adff76ea97c4..2696a6837782 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -10,49 +10,49 @@
10 * 'Traps.c' handles hardware traps and faults after we have saved some 10 * 'Traps.c' handles hardware traps and faults after we have saved some
11 * state in 'entry.S'. 11 * state in 'entry.S'.
12 */ 12 */
13#include <linux/sched.h> 13#include <linux/moduleparam.h>
14#include <linux/interrupt.h>
15#include <linux/kallsyms.h>
16#include <linux/spinlock.h>
17#include <linux/kprobes.h>
18#include <linux/uaccess.h>
19#include <linux/utsname.h>
20#include <linux/kdebug.h>
14#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/ptrace.h>
15#include <linux/string.h> 24#include <linux/string.h>
25#include <linux/unwind.h>
26#include <linux/delay.h>
16#include <linux/errno.h> 27#include <linux/errno.h>
17#include <linux/ptrace.h> 28#include <linux/kexec.h>
29#include <linux/sched.h>
18#include <linux/timer.h> 30#include <linux/timer.h>
19#include <linux/mm.h>
20#include <linux/init.h> 31#include <linux/init.h>
21#include <linux/delay.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h>
24#include <linux/kallsyms.h>
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/nmi.h>
28#include <linux/kprobes.h>
29#include <linux/kexec.h>
30#include <linux/unwind.h>
31#include <linux/uaccess.h>
32#include <linux/bug.h> 32#include <linux/bug.h>
33#include <linux/kdebug.h> 33#include <linux/nmi.h>
34#include <linux/utsname.h> 34#include <linux/mm.h>
35
36#include <mach_traps.h>
37 35
38#if defined(CONFIG_EDAC) 36#if defined(CONFIG_EDAC)
39#include <linux/edac.h> 37#include <linux/edac.h>
40#endif 38#endif
41 39
42#include <asm/system.h> 40#include <asm/stacktrace.h>
43#include <asm/io.h> 41#include <asm/processor.h>
44#include <asm/atomic.h>
45#include <asm/debugreg.h> 42#include <asm/debugreg.h>
43#include <asm/atomic.h>
44#include <asm/system.h>
45#include <asm/unwind.h>
46#include <asm/desc.h> 46#include <asm/desc.h>
47#include <asm/i387.h> 47#include <asm/i387.h>
48#include <asm/processor.h> 48#include <asm/nmi.h>
49#include <asm/unwind.h>
50#include <asm/smp.h> 49#include <asm/smp.h>
50#include <asm/io.h>
51#include <asm/pgalloc.h> 51#include <asm/pgalloc.h>
52#include <asm/pda.h>
53#include <asm/proto.h> 52#include <asm/proto.h>
54#include <asm/nmi.h> 53#include <asm/pda.h>
55#include <asm/stacktrace.h> 54
55#include <mach_traps.h>
56 56
57asmlinkage void divide_error(void); 57asmlinkage void divide_error(void);
58asmlinkage void debug(void); 58asmlinkage void debug(void);
@@ -71,12 +71,15 @@ asmlinkage void general_protection(void);
71asmlinkage void page_fault(void); 71asmlinkage void page_fault(void);
72asmlinkage void coprocessor_error(void); 72asmlinkage void coprocessor_error(void);
73asmlinkage void simd_coprocessor_error(void); 73asmlinkage void simd_coprocessor_error(void);
74asmlinkage void reserved(void);
75asmlinkage void alignment_check(void); 74asmlinkage void alignment_check(void);
76asmlinkage void machine_check(void);
77asmlinkage void spurious_interrupt_bug(void); 75asmlinkage void spurious_interrupt_bug(void);
76asmlinkage void machine_check(void);
78 77
78int panic_on_unrecovered_nmi;
79int kstack_depth_to_print = 12;
79static unsigned int code_bytes = 64; 80static unsigned int code_bytes = 64;
81static int ignore_nmis;
82static int die_counter;
80 83
81static inline void conditional_sti(struct pt_regs *regs) 84static inline void conditional_sti(struct pt_regs *regs)
82{ 85{
@@ -100,34 +103,9 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
100 dec_preempt_count(); 103 dec_preempt_count();
101} 104}
102 105
103int kstack_depth_to_print = 12;
104
105void printk_address(unsigned long address, int reliable) 106void printk_address(unsigned long address, int reliable)
106{ 107{
107#ifdef CONFIG_KALLSYMS 108 printk(" [<%016lx>] %s%pS\n", address, reliable ? "": "? ", (void *) address);
108 unsigned long offset = 0, symsize;
109 const char *symname;
110 char *modname;
111 char *delim = ":";
112 char namebuf[KSYM_NAME_LEN];
113 char reliab[4] = "";
114
115 symname = kallsyms_lookup(address, &symsize, &offset,
116 &modname, namebuf);
117 if (!symname) {
118 printk(" [<%016lx>]\n", address);
119 return;
120 }
121 if (!reliable)
122 strcpy(reliab, "? ");
123
124 if (!modname)
125 modname = delim = "";
126 printk(" [<%016lx>] %s%s%s%s%s+0x%lx/0x%lx\n",
127 address, reliab, delim, modname, delim, symname, offset, symsize);
128#else
129 printk(" [<%016lx>]\n", address);
130#endif
131} 109}
132 110
133static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, 111static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
@@ -204,8 +182,6 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
204 return NULL; 182 return NULL;
205} 183}
206 184
207#define MSG(txt) ops->warning(data, txt)
208
209/* 185/*
210 * x86-64 can have up to three kernel stacks: 186 * x86-64 can have up to three kernel stacks:
211 * process stack 187 * process stack
@@ -232,11 +208,11 @@ struct stack_frame {
232 unsigned long return_address; 208 unsigned long return_address;
233}; 209};
234 210
235 211static inline unsigned long
236static inline unsigned long print_context_stack(struct thread_info *tinfo, 212print_context_stack(struct thread_info *tinfo,
237 unsigned long *stack, unsigned long bp, 213 unsigned long *stack, unsigned long bp,
238 const struct stacktrace_ops *ops, void *data, 214 const struct stacktrace_ops *ops, void *data,
239 unsigned long *end) 215 unsigned long *end)
240{ 216{
241 struct stack_frame *frame = (struct stack_frame *)bp; 217 struct stack_frame *frame = (struct stack_frame *)bp;
242 218
@@ -258,7 +234,7 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
258 return bp; 234 return bp;
259} 235}
260 236
261void dump_trace(struct task_struct *tsk, struct pt_regs *regs, 237void dump_trace(struct task_struct *task, struct pt_regs *regs,
262 unsigned long *stack, unsigned long bp, 238 unsigned long *stack, unsigned long bp,
263 const struct stacktrace_ops *ops, void *data) 239 const struct stacktrace_ops *ops, void *data)
264{ 240{
@@ -267,36 +243,34 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
267 unsigned used = 0; 243 unsigned used = 0;
268 struct thread_info *tinfo; 244 struct thread_info *tinfo;
269 245
270 if (!tsk) 246 if (!task)
271 tsk = current; 247 task = current;
272 tinfo = task_thread_info(tsk);
273 248
274 if (!stack) { 249 if (!stack) {
275 unsigned long dummy; 250 unsigned long dummy;
276 stack = &dummy; 251 stack = &dummy;
277 if (tsk && tsk != current) 252 if (task && task != current)
278 stack = (unsigned long *)tsk->thread.sp; 253 stack = (unsigned long *)task->thread.sp;
279 } 254 }
280 255
281#ifdef CONFIG_FRAME_POINTER 256#ifdef CONFIG_FRAME_POINTER
282 if (!bp) { 257 if (!bp) {
283 if (tsk == current) { 258 if (task == current) {
284 /* Grab bp right from our regs */ 259 /* Grab bp right from our regs */
285 asm("movq %%rbp, %0" : "=r" (bp):); 260 asm("movq %%rbp, %0" : "=r" (bp) :);
286 } else { 261 } else {
287 /* bp is the last reg pushed by switch_to */ 262 /* bp is the last reg pushed by switch_to */
288 bp = *(unsigned long *) tsk->thread.sp; 263 bp = *(unsigned long *) task->thread.sp;
289 } 264 }
290 } 265 }
291#endif 266#endif
292 267
293
294
295 /* 268 /*
296 * Print function call entries in all stacks, starting at the 269 * Print function call entries in all stacks, starting at the
297 * current stack address. If the stacks consist of nested 270 * current stack address. If the stacks consist of nested
298 * exceptions 271 * exceptions
299 */ 272 */
273 tinfo = task_thread_info(task);
300 for (;;) { 274 for (;;) {
301 char *id; 275 char *id;
302 unsigned long *estack_end; 276 unsigned long *estack_end;
@@ -381,18 +355,17 @@ static const struct stacktrace_ops print_trace_ops = {
381 .address = print_trace_address, 355 .address = print_trace_address,
382}; 356};
383 357
384void 358void show_trace(struct task_struct *task, struct pt_regs *regs,
385show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack, 359 unsigned long *stack, unsigned long bp)
386 unsigned long bp)
387{ 360{
388 printk("\nCall Trace:\n"); 361 printk("\nCall Trace:\n");
389 dump_trace(tsk, regs, stack, bp, &print_trace_ops, NULL); 362 dump_trace(task, regs, stack, bp, &print_trace_ops, NULL);
390 printk("\n"); 363 printk("\n");
391} 364}
392 365
393static void 366static void
394_show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp, 367_show_stack(struct task_struct *task, struct pt_regs *regs,
395 unsigned long bp) 368 unsigned long *sp, unsigned long bp)
396{ 369{
397 unsigned long *stack; 370 unsigned long *stack;
398 int i; 371 int i;
@@ -404,14 +377,14 @@ _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp,
404 // back trace for this cpu. 377 // back trace for this cpu.
405 378
406 if (sp == NULL) { 379 if (sp == NULL) {
407 if (tsk) 380 if (task)
408 sp = (unsigned long *)tsk->thread.sp; 381 sp = (unsigned long *)task->thread.sp;
409 else 382 else
410 sp = (unsigned long *)&sp; 383 sp = (unsigned long *)&sp;
411 } 384 }
412 385
413 stack = sp; 386 stack = sp;
414 for(i=0; i < kstack_depth_to_print; i++) { 387 for (i = 0; i < kstack_depth_to_print; i++) {
415 if (stack >= irqstack && stack <= irqstack_end) { 388 if (stack >= irqstack && stack <= irqstack_end) {
416 if (stack == irqstack_end) { 389 if (stack == irqstack_end) {
417 stack = (unsigned long *) (irqstack_end[-1]); 390 stack = (unsigned long *) (irqstack_end[-1]);
@@ -426,12 +399,12 @@ _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp,
426 printk(" %016lx", *stack++); 399 printk(" %016lx", *stack++);
427 touch_nmi_watchdog(); 400 touch_nmi_watchdog();
428 } 401 }
429 show_trace(tsk, regs, sp, bp); 402 show_trace(task, regs, sp, bp);
430} 403}
431 404
432void show_stack(struct task_struct *tsk, unsigned long * sp) 405void show_stack(struct task_struct *task, unsigned long *sp)
433{ 406{
434 _show_stack(tsk, NULL, sp, 0); 407 _show_stack(task, NULL, sp, 0);
435} 408}
436 409
437/* 410/*
@@ -439,8 +412,8 @@ void show_stack(struct task_struct *tsk, unsigned long * sp)
439 */ 412 */
440void dump_stack(void) 413void dump_stack(void)
441{ 414{
442 unsigned long dummy;
443 unsigned long bp = 0; 415 unsigned long bp = 0;
416 unsigned long stack;
444 417
445#ifdef CONFIG_FRAME_POINTER 418#ifdef CONFIG_FRAME_POINTER
446 if (!bp) 419 if (!bp)
@@ -452,7 +425,7 @@ void dump_stack(void)
452 init_utsname()->release, 425 init_utsname()->release,
453 (int)strcspn(init_utsname()->version, " "), 426 (int)strcspn(init_utsname()->version, " "),
454 init_utsname()->version); 427 init_utsname()->version);
455 show_trace(NULL, NULL, &dummy, bp); 428 show_trace(NULL, NULL, &stack, bp);
456} 429}
457 430
458EXPORT_SYMBOL(dump_stack); 431EXPORT_SYMBOL(dump_stack);
@@ -463,12 +436,8 @@ void show_registers(struct pt_regs *regs)
463 unsigned long sp; 436 unsigned long sp;
464 const int cpu = smp_processor_id(); 437 const int cpu = smp_processor_id();
465 struct task_struct *cur = cpu_pda(cpu)->pcurrent; 438 struct task_struct *cur = cpu_pda(cpu)->pcurrent;
466 u8 *ip;
467 unsigned int code_prologue = code_bytes * 43 / 64;
468 unsigned int code_len = code_bytes;
469 439
470 sp = regs->sp; 440 sp = regs->sp;
471 ip = (u8 *) regs->ip - code_prologue;
472 printk("CPU %d ", cpu); 441 printk("CPU %d ", cpu);
473 __show_regs(regs); 442 __show_regs(regs);
474 printk("Process %s (pid: %d, threadinfo %p, task %p)\n", 443 printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
@@ -479,15 +448,21 @@ void show_registers(struct pt_regs *regs)
479 * time of the fault.. 448 * time of the fault..
480 */ 449 */
481 if (!user_mode(regs)) { 450 if (!user_mode(regs)) {
451 unsigned int code_prologue = code_bytes * 43 / 64;
452 unsigned int code_len = code_bytes;
482 unsigned char c; 453 unsigned char c;
454 u8 *ip;
455
483 printk("Stack: "); 456 printk("Stack: ");
484 _show_stack(NULL, regs, (unsigned long *)sp, regs->bp); 457 _show_stack(NULL, regs, (unsigned long *)sp, regs->bp);
485 printk("\n"); 458 printk("\n");
486 459
487 printk(KERN_EMERG "Code: "); 460 printk(KERN_EMERG "Code: ");
461
462 ip = (u8 *)regs->ip - code_prologue;
488 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { 463 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
489 /* try starting at RIP */ 464 /* try starting at RIP */
490 ip = (u8 *) regs->ip; 465 ip = (u8 *)regs->ip;
491 code_len = code_len - code_prologue + 1; 466 code_len = code_len - code_prologue + 1;
492 } 467 }
493 for (i = 0; i < code_len; i++, ip++) { 468 for (i = 0; i < code_len; i++, ip++) {
@@ -503,7 +478,7 @@ void show_registers(struct pt_regs *regs)
503 } 478 }
504 } 479 }
505 printk("\n"); 480 printk("\n");
506} 481}
507 482
508int is_valid_bugaddr(unsigned long ip) 483int is_valid_bugaddr(unsigned long ip)
509{ 484{
@@ -561,10 +536,9 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
561 do_exit(signr); 536 do_exit(signr);
562} 537}
563 538
564int __kprobes __die(const char * str, struct pt_regs * regs, long err) 539int __kprobes __die(const char *str, struct pt_regs *regs, long err)
565{ 540{
566 static int die_counter; 541 printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff, ++die_counter);
567 printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
568#ifdef CONFIG_PREEMPT 542#ifdef CONFIG_PREEMPT
569 printk("PREEMPT "); 543 printk("PREEMPT ");
570#endif 544#endif
@@ -575,8 +549,10 @@ int __kprobes __die(const char * str, struct pt_regs * regs, long err)
575 printk("DEBUG_PAGEALLOC"); 549 printk("DEBUG_PAGEALLOC");
576#endif 550#endif
577 printk("\n"); 551 printk("\n");
578 if (notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) 552 if (notify_die(DIE_OOPS, str, regs, err,
553 current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
579 return 1; 554 return 1;
555
580 show_registers(regs); 556 show_registers(regs);
581 add_taint(TAINT_DIE); 557 add_taint(TAINT_DIE);
582 /* Executive summary in case the oops scrolled away */ 558 /* Executive summary in case the oops scrolled away */
@@ -588,7 +564,7 @@ int __kprobes __die(const char * str, struct pt_regs * regs, long err)
588 return 0; 564 return 0;
589} 565}
590 566
591void die(const char * str, struct pt_regs * regs, long err) 567void die(const char *str, struct pt_regs *regs, long err)
592{ 568{
593 unsigned long flags = oops_begin(); 569 unsigned long flags = oops_begin();
594 570
@@ -605,8 +581,7 @@ die_nmi(char *str, struct pt_regs *regs, int do_panic)
605{ 581{
606 unsigned long flags; 582 unsigned long flags;
607 583
608 if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == 584 if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
609 NOTIFY_STOP)
610 return; 585 return;
611 586
612 flags = oops_begin(); 587 flags = oops_begin();
@@ -614,7 +589,9 @@ die_nmi(char *str, struct pt_regs *regs, int do_panic)
614 * We are in trouble anyway, lets at least try 589 * We are in trouble anyway, lets at least try
615 * to get a message out. 590 * to get a message out.
616 */ 591 */
617 printk(str, smp_processor_id()); 592 printk(KERN_EMERG "%s", str);
593 printk(" on CPU%d, ip %08lx, registers:\n",
594 smp_processor_id(), regs->ip);
618 show_registers(regs); 595 show_registers(regs);
619 if (kexec_should_crash(current)) 596 if (kexec_should_crash(current))
620 crash_kexec(regs); 597 crash_kexec(regs);
@@ -626,44 +603,44 @@ die_nmi(char *str, struct pt_regs *regs, int do_panic)
626 do_exit(SIGBUS); 603 do_exit(SIGBUS);
627} 604}
628 605
629static void __kprobes do_trap(int trapnr, int signr, char *str, 606static void __kprobes
630 struct pt_regs * regs, long error_code, 607do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
631 siginfo_t *info) 608 long error_code, siginfo_t *info)
632{ 609{
633 struct task_struct *tsk = current; 610 struct task_struct *tsk = current;
634 611
635 if (user_mode(regs)) { 612 if (!user_mode(regs))
636 /* 613 goto kernel_trap;
637 * We want error_code and trap_no set for userspace
638 * faults and kernelspace faults which result in
639 * die(), but not kernelspace faults which are fixed
640 * up. die() gives the process no chance to handle
641 * the signal and notice the kernel fault information,
642 * so that won't result in polluting the information
643 * about previously queued, but not yet delivered,
644 * faults. See also do_general_protection below.
645 */
646 tsk->thread.error_code = error_code;
647 tsk->thread.trap_no = trapnr;
648
649 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
650 printk_ratelimit()) {
651 printk(KERN_INFO
652 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
653 tsk->comm, tsk->pid, str,
654 regs->ip, regs->sp, error_code);
655 print_vma_addr(" in ", regs->ip);
656 printk("\n");
657 }
658 614
659 if (info) 615 /*
660 force_sig_info(signr, info, tsk); 616 * We want error_code and trap_no set for userspace faults and
661 else 617 * kernelspace faults which result in die(), but not
662 force_sig(signr, tsk); 618 * kernelspace faults which are fixed up. die() gives the
663 return; 619 * process no chance to handle the signal and notice the
620 * kernel fault information, so that won't result in polluting
621 * the information about previously queued, but not yet
622 * delivered, faults. See also do_general_protection below.
623 */
624 tsk->thread.error_code = error_code;
625 tsk->thread.trap_no = trapnr;
626
627 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
628 printk_ratelimit()) {
629 printk(KERN_INFO
630 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
631 tsk->comm, tsk->pid, str,
632 regs->ip, regs->sp, error_code);
633 print_vma_addr(" in ", regs->ip);
634 printk("\n");
664 } 635 }
665 636
637 if (info)
638 force_sig_info(signr, info, tsk);
639 else
640 force_sig(signr, tsk);
641 return;
666 642
643kernel_trap:
667 if (!fixup_exception(regs)) { 644 if (!fixup_exception(regs)) {
668 tsk->thread.error_code = error_code; 645 tsk->thread.error_code = error_code;
669 tsk->thread.trap_no = trapnr; 646 tsk->thread.trap_no = trapnr;
@@ -673,41 +650,39 @@ static void __kprobes do_trap(int trapnr, int signr, char *str,
673} 650}
674 651
675#define DO_ERROR(trapnr, signr, str, name) \ 652#define DO_ERROR(trapnr, signr, str, name) \
676asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ 653asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
677{ \ 654{ \
678 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 655 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
679 == NOTIFY_STOP) \ 656 == NOTIFY_STOP) \
680 return; \ 657 return; \
681 conditional_sti(regs); \ 658 conditional_sti(regs); \
682 do_trap(trapnr, signr, str, regs, error_code, NULL); \ 659 do_trap(trapnr, signr, str, regs, error_code, NULL); \
683} 660}
684 661
685#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ 662#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
686asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ 663asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
687{ \ 664{ \
688 siginfo_t info; \ 665 siginfo_t info; \
689 info.si_signo = signr; \ 666 info.si_signo = signr; \
690 info.si_errno = 0; \ 667 info.si_errno = 0; \
691 info.si_code = sicode; \ 668 info.si_code = sicode; \
692 info.si_addr = (void __user *)siaddr; \ 669 info.si_addr = (void __user *)siaddr; \
693 trace_hardirqs_fixup(); \ 670 trace_hardirqs_fixup(); \
694 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 671 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
695 == NOTIFY_STOP) \ 672 == NOTIFY_STOP) \
696 return; \ 673 return; \
697 conditional_sti(regs); \ 674 conditional_sti(regs); \
698 do_trap(trapnr, signr, str, regs, error_code, &info); \ 675 do_trap(trapnr, signr, str, regs, error_code, &info); \
699} 676}
700 677
701DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) 678DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
702DO_ERROR( 4, SIGSEGV, "overflow", overflow) 679DO_ERROR(4, SIGSEGV, "overflow", overflow)
703DO_ERROR( 5, SIGSEGV, "bounds", bounds) 680DO_ERROR(5, SIGSEGV, "bounds", bounds)
704DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip) 681DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
705DO_ERROR( 7, SIGSEGV, "device not available", device_not_available) 682DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
706DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
707DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) 683DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
708DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) 684DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
709DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) 685DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
710DO_ERROR(18, SIGSEGV, "reserved", reserved)
711 686
712/* Runs on IST stack */ 687/* Runs on IST stack */
713asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code) 688asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
@@ -737,31 +712,34 @@ asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
737 die(str, regs, error_code); 712 die(str, regs, error_code);
738} 713}
739 714
740asmlinkage void __kprobes do_general_protection(struct pt_regs * regs, 715asmlinkage void __kprobes
741 long error_code) 716do_general_protection(struct pt_regs *regs, long error_code)
742{ 717{
743 struct task_struct *tsk = current; 718 struct task_struct *tsk;
744 719
745 conditional_sti(regs); 720 conditional_sti(regs);
746 721
747 if (user_mode(regs)) { 722 tsk = current;
748 tsk->thread.error_code = error_code; 723 if (!user_mode(regs))
749 tsk->thread.trap_no = 13; 724 goto gp_in_kernel;
750
751 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
752 printk_ratelimit()) {
753 printk(KERN_INFO
754 "%s[%d] general protection ip:%lx sp:%lx error:%lx",
755 tsk->comm, tsk->pid,
756 regs->ip, regs->sp, error_code);
757 print_vma_addr(" in ", regs->ip);
758 printk("\n");
759 }
760 725
761 force_sig(SIGSEGV, tsk); 726 tsk->thread.error_code = error_code;
762 return; 727 tsk->thread.trap_no = 13;
763 } 728
729 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
730 printk_ratelimit()) {
731 printk(KERN_INFO
732 "%s[%d] general protection ip:%lx sp:%lx error:%lx",
733 tsk->comm, tsk->pid,
734 regs->ip, regs->sp, error_code);
735 print_vma_addr(" in ", regs->ip);
736 printk("\n");
737 }
764 738
739 force_sig(SIGSEGV, tsk);
740 return;
741
742gp_in_kernel:
765 if (fixup_exception(regs)) 743 if (fixup_exception(regs))
766 return; 744 return;
767 745
@@ -774,14 +752,14 @@ asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
774} 752}
775 753
776static notrace __kprobes void 754static notrace __kprobes void
777mem_parity_error(unsigned char reason, struct pt_regs * regs) 755mem_parity_error(unsigned char reason, struct pt_regs *regs)
778{ 756{
779 printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", 757 printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
780 reason); 758 reason);
781 printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n"); 759 printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
782 760
783#if defined(CONFIG_EDAC) 761#if defined(CONFIG_EDAC)
784 if(edac_handler_set()) { 762 if (edac_handler_set()) {
785 edac_atomic_assert_error(); 763 edac_atomic_assert_error();
786 return; 764 return;
787 } 765 }
@@ -798,7 +776,7 @@ mem_parity_error(unsigned char reason, struct pt_regs * regs)
798} 776}
799 777
800static notrace __kprobes void 778static notrace __kprobes void
801io_check_error(unsigned char reason, struct pt_regs * regs) 779io_check_error(unsigned char reason, struct pt_regs *regs)
802{ 780{
803 printk("NMI: IOCK error (debug interrupt?)\n"); 781 printk("NMI: IOCK error (debug interrupt?)\n");
804 show_registers(regs); 782 show_registers(regs);
@@ -828,14 +806,14 @@ unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
828 806
829/* Runs on IST stack. This code must keep interrupts off all the time. 807/* Runs on IST stack. This code must keep interrupts off all the time.
830 Nested NMIs are prevented by the CPU. */ 808 Nested NMIs are prevented by the CPU. */
831asmlinkage notrace __kprobes void default_do_nmi(struct pt_regs *regs) 809asmlinkage notrace __kprobes void default_do_nmi(struct pt_regs *regs)
832{ 810{
833 unsigned char reason = 0; 811 unsigned char reason = 0;
834 int cpu; 812 int cpu;
835 813
836 cpu = smp_processor_id(); 814 cpu = smp_processor_id();
837 815
838 /* Only the BSP gets external NMIs from the system. */ 816 /* Only the BSP gets external NMIs from the system. */
839 if (!cpu) 817 if (!cpu)
840 reason = get_nmi_reason(); 818 reason = get_nmi_reason();
841 819
@@ -847,32 +825,57 @@ asmlinkage notrace __kprobes void default_do_nmi(struct pt_regs *regs)
847 * Ok, so this is none of the documented NMI sources, 825 * Ok, so this is none of the documented NMI sources,
848 * so it must be the NMI watchdog. 826 * so it must be the NMI watchdog.
849 */ 827 */
850 if (nmi_watchdog_tick(regs,reason)) 828 if (nmi_watchdog_tick(regs, reason))
851 return; 829 return;
852 if (!do_nmi_callback(regs,cpu)) 830 if (!do_nmi_callback(regs, cpu))
853 unknown_nmi_error(reason, regs); 831 unknown_nmi_error(reason, regs);
854 832
855 return; 833 return;
856 } 834 }
857 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) 835 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
858 return; 836 return;
859 837
860 /* AK: following checks seem to be broken on modern chipsets. FIXME */ 838 /* AK: following checks seem to be broken on modern chipsets. FIXME */
861
862 if (reason & 0x80) 839 if (reason & 0x80)
863 mem_parity_error(reason, regs); 840 mem_parity_error(reason, regs);
864 if (reason & 0x40) 841 if (reason & 0x40)
865 io_check_error(reason, regs); 842 io_check_error(reason, regs);
866} 843}
867 844
845asmlinkage notrace __kprobes void
846do_nmi(struct pt_regs *regs, long error_code)
847{
848 nmi_enter();
849
850 add_pda(__nmi_count, 1);
851
852 if (!ignore_nmis)
853 default_do_nmi(regs);
854
855 nmi_exit();
856}
857
858void stop_nmi(void)
859{
860 acpi_nmi_disable();
861 ignore_nmis++;
862}
863
864void restart_nmi(void)
865{
866 ignore_nmis--;
867 acpi_nmi_enable();
868}
869
868/* runs on IST stack. */ 870/* runs on IST stack. */
869asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code) 871asmlinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
870{ 872{
871 trace_hardirqs_fixup(); 873 trace_hardirqs_fixup();
872 874
873 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) { 875 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
876 == NOTIFY_STOP)
874 return; 877 return;
875 } 878
876 preempt_conditional_sti(regs); 879 preempt_conditional_sti(regs);
877 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); 880 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
878 preempt_conditional_cli(regs); 881 preempt_conditional_cli(regs);
@@ -903,8 +906,8 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
903asmlinkage void __kprobes do_debug(struct pt_regs * regs, 906asmlinkage void __kprobes do_debug(struct pt_regs * regs,
904 unsigned long error_code) 907 unsigned long error_code)
905{ 908{
906 unsigned long condition;
907 struct task_struct *tsk = current; 909 struct task_struct *tsk = current;
910 unsigned long condition;
908 siginfo_t info; 911 siginfo_t info;
909 912
910 trace_hardirqs_fixup(); 913 trace_hardirqs_fixup();
@@ -925,21 +928,19 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs,
925 928
926 /* Mask out spurious debug traps due to lazy DR7 setting */ 929 /* Mask out spurious debug traps due to lazy DR7 setting */
927 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { 930 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
928 if (!tsk->thread.debugreg7) { 931 if (!tsk->thread.debugreg7)
929 goto clear_dr7; 932 goto clear_dr7;
930 }
931 } 933 }
932 934
933 tsk->thread.debugreg6 = condition; 935 tsk->thread.debugreg6 = condition;
934 936
935
936 /* 937 /*
937 * Single-stepping through TF: make sure we ignore any events in 938 * Single-stepping through TF: make sure we ignore any events in
938 * kernel space (but re-enable TF when returning to user mode). 939 * kernel space (but re-enable TF when returning to user mode).
939 */ 940 */
940 if (condition & DR_STEP) { 941 if (condition & DR_STEP) {
941 if (!user_mode(regs)) 942 if (!user_mode(regs))
942 goto clear_TF_reenable; 943 goto clear_TF_reenable;
943 } 944 }
944 945
945 /* Ok, finally something we can handle */ 946 /* Ok, finally something we can handle */
@@ -952,7 +953,7 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs,
952 force_sig_info(SIGTRAP, &info, tsk); 953 force_sig_info(SIGTRAP, &info, tsk);
953 954
954clear_dr7: 955clear_dr7:
955 set_debugreg(0UL, 7); 956 set_debugreg(0, 7);
956 preempt_conditional_cli(regs); 957 preempt_conditional_cli(regs);
957 return; 958 return;
958 959
@@ -960,6 +961,7 @@ clear_TF_reenable:
960 set_tsk_thread_flag(tsk, TIF_SINGLESTEP); 961 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
961 regs->flags &= ~X86_EFLAGS_TF; 962 regs->flags &= ~X86_EFLAGS_TF;
962 preempt_conditional_cli(regs); 963 preempt_conditional_cli(regs);
964 return;
963} 965}
964 966
965static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr) 967static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
@@ -982,7 +984,7 @@ static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
982asmlinkage void do_coprocessor_error(struct pt_regs *regs) 984asmlinkage void do_coprocessor_error(struct pt_regs *regs)
983{ 985{
984 void __user *ip = (void __user *)(regs->ip); 986 void __user *ip = (void __user *)(regs->ip);
985 struct task_struct * task; 987 struct task_struct *task;
986 siginfo_t info; 988 siginfo_t info;
987 unsigned short cwd, swd; 989 unsigned short cwd, swd;
988 990
@@ -1015,30 +1017,30 @@ asmlinkage void do_coprocessor_error(struct pt_regs *regs)
1015 cwd = get_fpu_cwd(task); 1017 cwd = get_fpu_cwd(task);
1016 swd = get_fpu_swd(task); 1018 swd = get_fpu_swd(task);
1017 switch (swd & ~cwd & 0x3f) { 1019 switch (swd & ~cwd & 0x3f) {
1018 case 0x000: 1020 case 0x000: /* No unmasked exception */
1019 default: 1021 default: /* Multiple exceptions */
1020 break; 1022 break;
1021 case 0x001: /* Invalid Op */ 1023 case 0x001: /* Invalid Op */
1022 /* 1024 /*
1023 * swd & 0x240 == 0x040: Stack Underflow 1025 * swd & 0x240 == 0x040: Stack Underflow
1024 * swd & 0x240 == 0x240: Stack Overflow 1026 * swd & 0x240 == 0x240: Stack Overflow
1025 * User must clear the SF bit (0x40) if set 1027 * User must clear the SF bit (0x40) if set
1026 */ 1028 */
1027 info.si_code = FPE_FLTINV; 1029 info.si_code = FPE_FLTINV;
1028 break; 1030 break;
1029 case 0x002: /* Denormalize */ 1031 case 0x002: /* Denormalize */
1030 case 0x010: /* Underflow */ 1032 case 0x010: /* Underflow */
1031 info.si_code = FPE_FLTUND; 1033 info.si_code = FPE_FLTUND;
1032 break; 1034 break;
1033 case 0x004: /* Zero Divide */ 1035 case 0x004: /* Zero Divide */
1034 info.si_code = FPE_FLTDIV; 1036 info.si_code = FPE_FLTDIV;
1035 break; 1037 break;
1036 case 0x008: /* Overflow */ 1038 case 0x008: /* Overflow */
1037 info.si_code = FPE_FLTOVF; 1039 info.si_code = FPE_FLTOVF;
1038 break; 1040 break;
1039 case 0x020: /* Precision */ 1041 case 0x020: /* Precision */
1040 info.si_code = FPE_FLTRES; 1042 info.si_code = FPE_FLTRES;
1041 break; 1043 break;
1042 } 1044 }
1043 force_sig_info(SIGFPE, &info, task); 1045 force_sig_info(SIGFPE, &info, task);
1044} 1046}
@@ -1051,7 +1053,7 @@ asmlinkage void bad_intr(void)
1051asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) 1053asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
1052{ 1054{
1053 void __user *ip = (void __user *)(regs->ip); 1055 void __user *ip = (void __user *)(regs->ip);
1054 struct task_struct * task; 1056 struct task_struct *task;
1055 siginfo_t info; 1057 siginfo_t info;
1056 unsigned short mxcsr; 1058 unsigned short mxcsr;
1057 1059
@@ -1079,25 +1081,25 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
1079 */ 1081 */
1080 mxcsr = get_fpu_mxcsr(task); 1082 mxcsr = get_fpu_mxcsr(task);
1081 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { 1083 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
1082 case 0x000: 1084 case 0x000:
1083 default: 1085 default:
1084 break; 1086 break;
1085 case 0x001: /* Invalid Op */ 1087 case 0x001: /* Invalid Op */
1086 info.si_code = FPE_FLTINV; 1088 info.si_code = FPE_FLTINV;
1087 break; 1089 break;
1088 case 0x002: /* Denormalize */ 1090 case 0x002: /* Denormalize */
1089 case 0x010: /* Underflow */ 1091 case 0x010: /* Underflow */
1090 info.si_code = FPE_FLTUND; 1092 info.si_code = FPE_FLTUND;
1091 break; 1093 break;
1092 case 0x004: /* Zero Divide */ 1094 case 0x004: /* Zero Divide */
1093 info.si_code = FPE_FLTDIV; 1095 info.si_code = FPE_FLTDIV;
1094 break; 1096 break;
1095 case 0x008: /* Overflow */ 1097 case 0x008: /* Overflow */
1096 info.si_code = FPE_FLTOVF; 1098 info.si_code = FPE_FLTOVF;
1097 break; 1099 break;
1098 case 0x020: /* Precision */ 1100 case 0x020: /* Precision */
1099 info.si_code = FPE_FLTRES; 1101 info.si_code = FPE_FLTRES;
1100 break; 1102 break;
1101 } 1103 }
1102 force_sig_info(SIGFPE, &info, task); 1104 force_sig_info(SIGFPE, &info, task);
1103} 1105}
@@ -1115,7 +1117,7 @@ asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
1115} 1117}
1116 1118
1117/* 1119/*
1118 * 'math_state_restore()' saves the current math information in the 1120 * 'math_state_restore()' saves the current math information in the
1119 * old math state array, and gets the new ones from the current task 1121 * old math state array, and gets the new ones from the current task
1120 * 1122 *
1121 * Careful.. There are problems with IBM-designed IRQ13 behaviour. 1123 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
@@ -1140,7 +1142,7 @@ asmlinkage void math_state_restore(void)
1140 local_irq_disable(); 1142 local_irq_disable();
1141 } 1143 }
1142 1144
1143 clts(); /* Allow maths ops (or we recurse) */ 1145 clts(); /* Allow maths ops (or we recurse) */
1144 restore_fpu_checking(&me->thread.xstate->fxsave); 1146 restore_fpu_checking(&me->thread.xstate->fxsave);
1145 task_thread_info(me)->status |= TS_USEDFPU; 1147 task_thread_info(me)->status |= TS_USEDFPU;
1146 me->fpu_counter++; 1148 me->fpu_counter++;
@@ -1149,64 +1151,61 @@ EXPORT_SYMBOL_GPL(math_state_restore);
1149 1151
1150void __init trap_init(void) 1152void __init trap_init(void)
1151{ 1153{
1152 set_intr_gate(0,&divide_error); 1154 set_intr_gate(0, &divide_error);
1153 set_intr_gate_ist(1,&debug,DEBUG_STACK); 1155 set_intr_gate_ist(1, &debug, DEBUG_STACK);
1154 set_intr_gate_ist(2,&nmi,NMI_STACK); 1156 set_intr_gate_ist(2, &nmi, NMI_STACK);
1155 set_system_gate_ist(3,&int3,DEBUG_STACK); /* int3 can be called from all */ 1157 set_system_gate_ist(3, &int3, DEBUG_STACK); /* int3 can be called from all */
1156 set_system_gate(4,&overflow); /* int4 can be called from all */ 1158 set_system_gate(4, &overflow); /* int4 can be called from all */
1157 set_intr_gate(5,&bounds); 1159 set_intr_gate(5, &bounds);
1158 set_intr_gate(6,&invalid_op); 1160 set_intr_gate(6, &invalid_op);
1159 set_intr_gate(7,&device_not_available); 1161 set_intr_gate(7, &device_not_available);
1160 set_intr_gate_ist(8,&double_fault, DOUBLEFAULT_STACK); 1162 set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK);
1161 set_intr_gate(9,&coprocessor_segment_overrun); 1163 set_intr_gate(9, &coprocessor_segment_overrun);
1162 set_intr_gate(10,&invalid_TSS); 1164 set_intr_gate(10, &invalid_TSS);
1163 set_intr_gate(11,&segment_not_present); 1165 set_intr_gate(11, &segment_not_present);
1164 set_intr_gate_ist(12,&stack_segment,STACKFAULT_STACK); 1166 set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK);
1165 set_intr_gate(13,&general_protection); 1167 set_intr_gate(13, &general_protection);
1166 set_intr_gate(14,&page_fault); 1168 set_intr_gate(14, &page_fault);
1167 set_intr_gate(15,&spurious_interrupt_bug); 1169 set_intr_gate(15, &spurious_interrupt_bug);
1168 set_intr_gate(16,&coprocessor_error); 1170 set_intr_gate(16, &coprocessor_error);
1169 set_intr_gate(17,&alignment_check); 1171 set_intr_gate(17, &alignment_check);
1170#ifdef CONFIG_X86_MCE 1172#ifdef CONFIG_X86_MCE
1171 set_intr_gate_ist(18,&machine_check, MCE_STACK); 1173 set_intr_gate_ist(18, &machine_check, MCE_STACK);
1172#endif 1174#endif
1173 set_intr_gate(19,&simd_coprocessor_error); 1175 set_intr_gate(19, &simd_coprocessor_error);
1174 1176
1175#ifdef CONFIG_IA32_EMULATION 1177#ifdef CONFIG_IA32_EMULATION
1176 set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall); 1178 set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
1177#endif 1179#endif
1178
1179 /* 1180 /*
1180 * initialize the per thread extended state: 1181 * initialize the per thread extended state:
1181 */ 1182 */
1182 init_thread_xstate(); 1183 init_thread_xstate();
1183 /* 1184 /*
1184 * Should be a barrier for any external CPU state. 1185 * Should be a barrier for any external CPU state:
1185 */ 1186 */
1186 cpu_init(); 1187 cpu_init();
1187} 1188}
1188 1189
1189
1190static int __init oops_setup(char *s) 1190static int __init oops_setup(char *s)
1191{ 1191{
1192 if (!s) 1192 if (!s)
1193 return -EINVAL; 1193 return -EINVAL;
1194 if (!strcmp(s, "panic")) 1194 if (!strcmp(s, "panic"))
1195 panic_on_oops = 1; 1195 panic_on_oops = 1;
1196 return 0; 1196 return 0;
1197} 1197}
1198early_param("oops", oops_setup); 1198early_param("oops", oops_setup);
1199 1199
1200static int __init kstack_setup(char *s) 1200static int __init kstack_setup(char *s)
1201{ 1201{
1202 if (!s) 1202 if (!s)
1203 return -EINVAL; 1203 return -EINVAL;
1204 kstack_depth_to_print = simple_strtoul(s,NULL,0); 1204 kstack_depth_to_print = simple_strtoul(s, NULL, 0);
1205 return 0; 1205 return 0;
1206} 1206}
1207early_param("kstack", kstack_setup); 1207early_param("kstack", kstack_setup);
1208 1208
1209
1210static int __init code_bytes_setup(char *s) 1209static int __init code_bytes_setup(char *s)
1211{ 1210{
1212 code_bytes = simple_strtoul(s, NULL, 0); 1211 code_bytes = simple_strtoul(s, NULL, 0);