diff options
author | Alexander van Heukelum <heukelum@mailshack.com> | 2008-07-01 19:29:44 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-09 01:43:28 -0400 |
commit | a8c1be9d2e78d8608892c86791837acf12da4bf6 (patch) | |
tree | 274fc038d6bff0535f8535e9b63bb7d4e10f12ed /arch/x86/kernel/traps_64.c | |
parent | e93ef949fd9a3f237aedfb8e64414b28980530b8 (diff) |
x86: initial changes to unify traps_32.c and traps_64.c
This patch does not change the generated object files.
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/traps_64.c')
-rw-r--r-- | arch/x86/kernel/traps_64.c | 309 |
1 files changed, 152 insertions, 157 deletions
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index 80ba6d37bfe0..686074e6caf9 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c | |||
@@ -205,8 +205,6 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | |||
205 | return NULL; | 205 | return NULL; |
206 | } | 206 | } |
207 | 207 | ||
208 | #define MSG(txt) ops->warning(data, txt) | ||
209 | |||
210 | /* | 208 | /* |
211 | * x86-64 can have up to three kernel stacks: | 209 | * x86-64 can have up to three kernel stacks: |
212 | * process stack | 210 | * process stack |
@@ -233,11 +231,11 @@ struct stack_frame { | |||
233 | unsigned long return_address; | 231 | unsigned long return_address; |
234 | }; | 232 | }; |
235 | 233 | ||
236 | 234 | static inline unsigned long | |
237 | static inline unsigned long print_context_stack(struct thread_info *tinfo, | 235 | print_context_stack(struct thread_info *tinfo, |
238 | unsigned long *stack, unsigned long bp, | 236 | unsigned long *stack, unsigned long bp, |
239 | const struct stacktrace_ops *ops, void *data, | 237 | const struct stacktrace_ops *ops, void *data, |
240 | unsigned long *end) | 238 | unsigned long *end) |
241 | { | 239 | { |
242 | struct stack_frame *frame = (struct stack_frame *)bp; | 240 | struct stack_frame *frame = (struct stack_frame *)bp; |
243 | 241 | ||
@@ -259,7 +257,7 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo, | |||
259 | return bp; | 257 | return bp; |
260 | } | 258 | } |
261 | 259 | ||
262 | void dump_trace(struct task_struct *tsk, struct pt_regs *regs, | 260 | void dump_trace(struct task_struct *task, struct pt_regs *regs, |
263 | unsigned long *stack, unsigned long bp, | 261 | unsigned long *stack, unsigned long bp, |
264 | const struct stacktrace_ops *ops, void *data) | 262 | const struct stacktrace_ops *ops, void *data) |
265 | { | 263 | { |
@@ -268,31 +266,29 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, | |||
268 | unsigned used = 0; | 266 | unsigned used = 0; |
269 | struct thread_info *tinfo; | 267 | struct thread_info *tinfo; |
270 | 268 | ||
271 | if (!tsk) | 269 | if (!task) |
272 | tsk = current; | 270 | task = current; |
273 | tinfo = task_thread_info(tsk); | 271 | tinfo = task_thread_info(task); |
274 | 272 | ||
275 | if (!stack) { | 273 | if (!stack) { |
276 | unsigned long dummy; | 274 | unsigned long dummy; |
277 | stack = &dummy; | 275 | stack = &dummy; |
278 | if (tsk && tsk != current) | 276 | if (task && task != current) |
279 | stack = (unsigned long *)tsk->thread.sp; | 277 | stack = (unsigned long *)task->thread.sp; |
280 | } | 278 | } |
281 | 279 | ||
282 | #ifdef CONFIG_FRAME_POINTER | 280 | #ifdef CONFIG_FRAME_POINTER |
283 | if (!bp) { | 281 | if (!bp) { |
284 | if (tsk == current) { | 282 | if (task == current) { |
285 | /* Grab bp right from our regs */ | 283 | /* Grab bp right from our regs */ |
286 | asm("movq %%rbp, %0" : "=r" (bp):); | 284 | asm("movq %%rbp, %0" : "=r" (bp) :); |
287 | } else { | 285 | } else { |
288 | /* bp is the last reg pushed by switch_to */ | 286 | /* bp is the last reg pushed by switch_to */ |
289 | bp = *(unsigned long *) tsk->thread.sp; | 287 | bp = *(unsigned long *) task->thread.sp; |
290 | } | 288 | } |
291 | } | 289 | } |
292 | #endif | 290 | #endif |
293 | 291 | ||
294 | |||
295 | |||
296 | /* | 292 | /* |
297 | * Print function call entries in all stacks, starting at the | 293 | * Print function call entries in all stacks, starting at the |
298 | * current stack address. If the stacks consist of nested | 294 | * current stack address. If the stacks consist of nested |
@@ -382,18 +378,17 @@ static const struct stacktrace_ops print_trace_ops = { | |||
382 | .address = print_trace_address, | 378 | .address = print_trace_address, |
383 | }; | 379 | }; |
384 | 380 | ||
385 | void | 381 | void show_trace(struct task_struct *task, struct pt_regs *regs, |
386 | show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack, | 382 | unsigned long *stack, unsigned long bp) |
387 | unsigned long bp) | ||
388 | { | 383 | { |
389 | printk("\nCall Trace:\n"); | 384 | printk("\nCall Trace:\n"); |
390 | dump_trace(tsk, regs, stack, bp, &print_trace_ops, NULL); | 385 | dump_trace(task, regs, stack, bp, &print_trace_ops, NULL); |
391 | printk("\n"); | 386 | printk("\n"); |
392 | } | 387 | } |
393 | 388 | ||
394 | static void | 389 | static void |
395 | _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp, | 390 | _show_stack(struct task_struct *task, struct pt_regs *regs, |
396 | unsigned long bp) | 391 | unsigned long *sp, unsigned long bp) |
397 | { | 392 | { |
398 | unsigned long *stack; | 393 | unsigned long *stack; |
399 | int i; | 394 | int i; |
@@ -405,14 +400,14 @@ _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp, | |||
405 | // back trace for this cpu. | 400 | // back trace for this cpu. |
406 | 401 | ||
407 | if (sp == NULL) { | 402 | if (sp == NULL) { |
408 | if (tsk) | 403 | if (task) |
409 | sp = (unsigned long *)tsk->thread.sp; | 404 | sp = (unsigned long *)task->thread.sp; |
410 | else | 405 | else |
411 | sp = (unsigned long *)&sp; | 406 | sp = (unsigned long *)&sp; |
412 | } | 407 | } |
413 | 408 | ||
414 | stack = sp; | 409 | stack = sp; |
415 | for(i=0; i < kstack_depth_to_print; i++) { | 410 | for (i = 0; i < kstack_depth_to_print; i++) { |
416 | if (stack >= irqstack && stack <= irqstack_end) { | 411 | if (stack >= irqstack && stack <= irqstack_end) { |
417 | if (stack == irqstack_end) { | 412 | if (stack == irqstack_end) { |
418 | stack = (unsigned long *) (irqstack_end[-1]); | 413 | stack = (unsigned long *) (irqstack_end[-1]); |
@@ -427,12 +422,12 @@ _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp, | |||
427 | printk(" %016lx", *stack++); | 422 | printk(" %016lx", *stack++); |
428 | touch_nmi_watchdog(); | 423 | touch_nmi_watchdog(); |
429 | } | 424 | } |
430 | show_trace(tsk, regs, sp, bp); | 425 | show_trace(task, regs, sp, bp); |
431 | } | 426 | } |
432 | 427 | ||
433 | void show_stack(struct task_struct *tsk, unsigned long * sp) | 428 | void show_stack(struct task_struct *task, unsigned long *sp) |
434 | { | 429 | { |
435 | _show_stack(tsk, NULL, sp, 0); | 430 | _show_stack(task, NULL, sp, 0); |
436 | } | 431 | } |
437 | 432 | ||
438 | /* | 433 | /* |
@@ -440,7 +435,7 @@ void show_stack(struct task_struct *tsk, unsigned long * sp) | |||
440 | */ | 435 | */ |
441 | void dump_stack(void) | 436 | void dump_stack(void) |
442 | { | 437 | { |
443 | unsigned long dummy; | 438 | unsigned long stack; |
444 | unsigned long bp = 0; | 439 | unsigned long bp = 0; |
445 | 440 | ||
446 | #ifdef CONFIG_FRAME_POINTER | 441 | #ifdef CONFIG_FRAME_POINTER |
@@ -453,7 +448,7 @@ void dump_stack(void) | |||
453 | init_utsname()->release, | 448 | init_utsname()->release, |
454 | (int)strcspn(init_utsname()->version, " "), | 449 | (int)strcspn(init_utsname()->version, " "), |
455 | init_utsname()->version); | 450 | init_utsname()->version); |
456 | show_trace(NULL, NULL, &dummy, bp); | 451 | show_trace(NULL, NULL, &stack, bp); |
457 | } | 452 | } |
458 | 453 | ||
459 | EXPORT_SYMBOL(dump_stack); | 454 | EXPORT_SYMBOL(dump_stack); |
@@ -488,7 +483,7 @@ void show_registers(struct pt_regs *regs) | |||
488 | printk(KERN_EMERG "Code: "); | 483 | printk(KERN_EMERG "Code: "); |
489 | if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { | 484 | if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { |
490 | /* try starting at RIP */ | 485 | /* try starting at RIP */ |
491 | ip = (u8 *) regs->ip; | 486 | ip = (u8 *)regs->ip; |
492 | code_len = code_len - code_prologue + 1; | 487 | code_len = code_len - code_prologue + 1; |
493 | } | 488 | } |
494 | for (i = 0; i < code_len; i++, ip++) { | 489 | for (i = 0; i < code_len; i++, ip++) { |
@@ -504,7 +499,7 @@ void show_registers(struct pt_regs *regs) | |||
504 | } | 499 | } |
505 | } | 500 | } |
506 | printk("\n"); | 501 | printk("\n"); |
507 | } | 502 | } |
508 | 503 | ||
509 | int is_valid_bugaddr(unsigned long ip) | 504 | int is_valid_bugaddr(unsigned long ip) |
510 | { | 505 | { |
@@ -576,8 +571,10 @@ int __kprobes __die(const char * str, struct pt_regs * regs, long err) | |||
576 | printk("DEBUG_PAGEALLOC"); | 571 | printk("DEBUG_PAGEALLOC"); |
577 | #endif | 572 | #endif |
578 | printk("\n"); | 573 | printk("\n"); |
579 | if (notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) | 574 | if (notify_die(DIE_OOPS, str, regs, err, |
575 | current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) | ||
580 | return 1; | 576 | return 1; |
577 | |||
581 | show_registers(regs); | 578 | show_registers(regs); |
582 | add_taint(TAINT_DIE); | 579 | add_taint(TAINT_DIE); |
583 | /* Executive summary in case the oops scrolled away */ | 580 | /* Executive summary in case the oops scrolled away */ |
@@ -589,7 +586,7 @@ int __kprobes __die(const char * str, struct pt_regs * regs, long err) | |||
589 | return 0; | 586 | return 0; |
590 | } | 587 | } |
591 | 588 | ||
592 | void die(const char * str, struct pt_regs * regs, long err) | 589 | void die(const char * str, struct pt_regs *regs, long err) |
593 | { | 590 | { |
594 | unsigned long flags = oops_begin(); | 591 | unsigned long flags = oops_begin(); |
595 | 592 | ||
@@ -606,8 +603,7 @@ die_nmi(char *str, struct pt_regs *regs, int do_panic) | |||
606 | { | 603 | { |
607 | unsigned long flags; | 604 | unsigned long flags; |
608 | 605 | ||
609 | if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == | 606 | if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP) |
610 | NOTIFY_STOP) | ||
611 | return; | 607 | return; |
612 | 608 | ||
613 | flags = oops_begin(); | 609 | flags = oops_begin(); |
@@ -629,9 +625,9 @@ die_nmi(char *str, struct pt_regs *regs, int do_panic) | |||
629 | do_exit(SIGBUS); | 625 | do_exit(SIGBUS); |
630 | } | 626 | } |
631 | 627 | ||
632 | static void __kprobes do_trap(int trapnr, int signr, char *str, | 628 | static void __kprobes |
633 | struct pt_regs * regs, long error_code, | 629 | do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, |
634 | siginfo_t *info) | 630 | long error_code, siginfo_t *info) |
635 | { | 631 | { |
636 | struct task_struct *tsk = current; | 632 | struct task_struct *tsk = current; |
637 | 633 | ||
@@ -676,38 +672,38 @@ static void __kprobes do_trap(int trapnr, int signr, char *str, | |||
676 | } | 672 | } |
677 | 673 | ||
678 | #define DO_ERROR(trapnr, signr, str, name) \ | 674 | #define DO_ERROR(trapnr, signr, str, name) \ |
679 | asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | 675 | asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ |
680 | { \ | 676 | { \ |
681 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | 677 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
682 | == NOTIFY_STOP) \ | 678 | == NOTIFY_STOP) \ |
683 | return; \ | 679 | return; \ |
684 | conditional_sti(regs); \ | 680 | conditional_sti(regs); \ |
685 | do_trap(trapnr, signr, str, regs, error_code, NULL); \ | 681 | do_trap(trapnr, signr, str, regs, error_code, NULL); \ |
686 | } | 682 | } |
687 | 683 | ||
688 | #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ | 684 | #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ |
689 | asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | 685 | asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ |
690 | { \ | 686 | { \ |
691 | siginfo_t info; \ | 687 | siginfo_t info; \ |
692 | info.si_signo = signr; \ | 688 | info.si_signo = signr; \ |
693 | info.si_errno = 0; \ | 689 | info.si_errno = 0; \ |
694 | info.si_code = sicode; \ | 690 | info.si_code = sicode; \ |
695 | info.si_addr = (void __user *)siaddr; \ | 691 | info.si_addr = (void __user *)siaddr; \ |
696 | trace_hardirqs_fixup(); \ | 692 | trace_hardirqs_fixup(); \ |
697 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | 693 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
698 | == NOTIFY_STOP) \ | 694 | == NOTIFY_STOP) \ |
699 | return; \ | 695 | return; \ |
700 | conditional_sti(regs); \ | 696 | conditional_sti(regs); \ |
701 | do_trap(trapnr, signr, str, regs, error_code, &info); \ | 697 | do_trap(trapnr, signr, str, regs, error_code, &info); \ |
702 | } | 698 | } |
703 | 699 | ||
704 | DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) | 700 | DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) |
705 | DO_ERROR( 4, SIGSEGV, "overflow", overflow) | 701 | DO_ERROR(4, SIGSEGV, "overflow", overflow) |
706 | DO_ERROR( 5, SIGSEGV, "bounds", bounds) | 702 | DO_ERROR(5, SIGSEGV, "bounds", bounds) |
707 | DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip) | 703 | DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip) |
708 | DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) | 704 | DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) |
709 | DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) | 705 | DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) |
710 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) | 706 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) |
711 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) | 707 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) |
712 | 708 | ||
713 | /* Runs on IST stack */ | 709 | /* Runs on IST stack */ |
@@ -775,14 +771,14 @@ asmlinkage void __kprobes do_general_protection(struct pt_regs * regs, | |||
775 | } | 771 | } |
776 | 772 | ||
777 | static notrace __kprobes void | 773 | static notrace __kprobes void |
778 | mem_parity_error(unsigned char reason, struct pt_regs * regs) | 774 | mem_parity_error(unsigned char reason, struct pt_regs *regs) |
779 | { | 775 | { |
780 | printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", | 776 | printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", |
781 | reason); | 777 | reason); |
782 | printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n"); | 778 | printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n"); |
783 | 779 | ||
784 | #if defined(CONFIG_EDAC) | 780 | #if defined(CONFIG_EDAC) |
785 | if(edac_handler_set()) { | 781 | if (edac_handler_set()) { |
786 | edac_atomic_assert_error(); | 782 | edac_atomic_assert_error(); |
787 | return; | 783 | return; |
788 | } | 784 | } |
@@ -799,7 +795,7 @@ mem_parity_error(unsigned char reason, struct pt_regs * regs) | |||
799 | } | 795 | } |
800 | 796 | ||
801 | static notrace __kprobes void | 797 | static notrace __kprobes void |
802 | io_check_error(unsigned char reason, struct pt_regs * regs) | 798 | io_check_error(unsigned char reason, struct pt_regs *regs) |
803 | { | 799 | { |
804 | printk("NMI: IOCK error (debug interrupt?)\n"); | 800 | printk("NMI: IOCK error (debug interrupt?)\n"); |
805 | show_registers(regs); | 801 | show_registers(regs); |
@@ -836,7 +832,7 @@ asmlinkage notrace __kprobes void default_do_nmi(struct pt_regs *regs) | |||
836 | 832 | ||
837 | cpu = smp_processor_id(); | 833 | cpu = smp_processor_id(); |
838 | 834 | ||
839 | /* Only the BSP gets external NMIs from the system. */ | 835 | /* Only the BSP gets external NMIs from the system. */ |
840 | if (!cpu) | 836 | if (!cpu) |
841 | reason = get_nmi_reason(); | 837 | reason = get_nmi_reason(); |
842 | 838 | ||
@@ -848,18 +844,17 @@ asmlinkage notrace __kprobes void default_do_nmi(struct pt_regs *regs) | |||
848 | * Ok, so this is none of the documented NMI sources, | 844 | * Ok, so this is none of the documented NMI sources, |
849 | * so it must be the NMI watchdog. | 845 | * so it must be the NMI watchdog. |
850 | */ | 846 | */ |
851 | if (nmi_watchdog_tick(regs,reason)) | 847 | if (nmi_watchdog_tick(regs, reason)) |
852 | return; | 848 | return; |
853 | if (!do_nmi_callback(regs,cpu)) | 849 | if (!do_nmi_callback(regs, cpu)) |
854 | unknown_nmi_error(reason, regs); | 850 | unknown_nmi_error(reason, regs); |
855 | 851 | ||
856 | return; | 852 | return; |
857 | } | 853 | } |
858 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) | 854 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) |
859 | return; | 855 | return; |
860 | 856 | ||
861 | /* AK: following checks seem to be broken on modern chipsets. FIXME */ | 857 | /* AK: following checks seem to be broken on modern chipsets. FIXME */ |
862 | |||
863 | if (reason & 0x80) | 858 | if (reason & 0x80) |
864 | mem_parity_error(reason, regs); | 859 | mem_parity_error(reason, regs); |
865 | if (reason & 0x40) | 860 | if (reason & 0x40) |
@@ -870,9 +865,12 @@ asmlinkage notrace __kprobes void | |||
870 | do_nmi(struct pt_regs *regs, long error_code) | 865 | do_nmi(struct pt_regs *regs, long error_code) |
871 | { | 866 | { |
872 | nmi_enter(); | 867 | nmi_enter(); |
868 | |||
873 | add_pda(__nmi_count, 1); | 869 | add_pda(__nmi_count, 1); |
870 | |||
874 | if (!ignore_nmis) | 871 | if (!ignore_nmis) |
875 | default_do_nmi(regs); | 872 | default_do_nmi(regs); |
873 | |||
876 | nmi_exit(); | 874 | nmi_exit(); |
877 | } | 875 | } |
878 | 876 | ||
@@ -889,13 +887,14 @@ void restart_nmi(void) | |||
889 | } | 887 | } |
890 | 888 | ||
891 | /* runs on IST stack. */ | 889 | /* runs on IST stack. */ |
892 | asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code) | 890 | asmlinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) |
893 | { | 891 | { |
894 | trace_hardirqs_fixup(); | 892 | trace_hardirqs_fixup(); |
895 | 893 | ||
896 | if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) { | 894 | if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) |
895 | == NOTIFY_STOP) | ||
897 | return; | 896 | return; |
898 | } | 897 | |
899 | preempt_conditional_sti(regs); | 898 | preempt_conditional_sti(regs); |
900 | do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); | 899 | do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); |
901 | preempt_conditional_cli(regs); | 900 | preempt_conditional_cli(regs); |
@@ -948,21 +947,19 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs, | |||
948 | 947 | ||
949 | /* Mask out spurious debug traps due to lazy DR7 setting */ | 948 | /* Mask out spurious debug traps due to lazy DR7 setting */ |
950 | if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { | 949 | if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { |
951 | if (!tsk->thread.debugreg7) { | 950 | if (!tsk->thread.debugreg7) |
952 | goto clear_dr7; | 951 | goto clear_dr7; |
953 | } | ||
954 | } | 952 | } |
955 | 953 | ||
956 | tsk->thread.debugreg6 = condition; | 954 | tsk->thread.debugreg6 = condition; |
957 | 955 | ||
958 | |||
959 | /* | 956 | /* |
960 | * Single-stepping through TF: make sure we ignore any events in | 957 | * Single-stepping through TF: make sure we ignore any events in |
961 | * kernel space (but re-enable TF when returning to user mode). | 958 | * kernel space (but re-enable TF when returning to user mode). |
962 | */ | 959 | */ |
963 | if (condition & DR_STEP) { | 960 | if (condition & DR_STEP) { |
964 | if (!user_mode(regs)) | 961 | if (!user_mode(regs)) |
965 | goto clear_TF_reenable; | 962 | goto clear_TF_reenable; |
966 | } | 963 | } |
967 | 964 | ||
968 | /* Ok, finally something we can handle */ | 965 | /* Ok, finally something we can handle */ |
@@ -975,7 +972,7 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs, | |||
975 | force_sig_info(SIGTRAP, &info, tsk); | 972 | force_sig_info(SIGTRAP, &info, tsk); |
976 | 973 | ||
977 | clear_dr7: | 974 | clear_dr7: |
978 | set_debugreg(0UL, 7); | 975 | set_debugreg(0, 7); |
979 | preempt_conditional_cli(regs); | 976 | preempt_conditional_cli(regs); |
980 | return; | 977 | return; |
981 | 978 | ||
@@ -983,6 +980,7 @@ clear_TF_reenable: | |||
983 | set_tsk_thread_flag(tsk, TIF_SINGLESTEP); | 980 | set_tsk_thread_flag(tsk, TIF_SINGLESTEP); |
984 | regs->flags &= ~X86_EFLAGS_TF; | 981 | regs->flags &= ~X86_EFLAGS_TF; |
985 | preempt_conditional_cli(regs); | 982 | preempt_conditional_cli(regs); |
983 | return; | ||
986 | } | 984 | } |
987 | 985 | ||
988 | static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr) | 986 | static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr) |
@@ -1005,7 +1003,7 @@ static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr) | |||
1005 | asmlinkage void do_coprocessor_error(struct pt_regs *regs) | 1003 | asmlinkage void do_coprocessor_error(struct pt_regs *regs) |
1006 | { | 1004 | { |
1007 | void __user *ip = (void __user *)(regs->ip); | 1005 | void __user *ip = (void __user *)(regs->ip); |
1008 | struct task_struct * task; | 1006 | struct task_struct *task; |
1009 | siginfo_t info; | 1007 | siginfo_t info; |
1010 | unsigned short cwd, swd; | 1008 | unsigned short cwd, swd; |
1011 | 1009 | ||
@@ -1038,30 +1036,30 @@ asmlinkage void do_coprocessor_error(struct pt_regs *regs) | |||
1038 | cwd = get_fpu_cwd(task); | 1036 | cwd = get_fpu_cwd(task); |
1039 | swd = get_fpu_swd(task); | 1037 | swd = get_fpu_swd(task); |
1040 | switch (swd & ~cwd & 0x3f) { | 1038 | switch (swd & ~cwd & 0x3f) { |
1041 | case 0x000: | 1039 | case 0x000: /* No unmasked exception */ |
1042 | default: | 1040 | default: /* Multiple exceptions */ |
1043 | break; | 1041 | break; |
1044 | case 0x001: /* Invalid Op */ | 1042 | case 0x001: /* Invalid Op */ |
1045 | /* | 1043 | /* |
1046 | * swd & 0x240 == 0x040: Stack Underflow | 1044 | * swd & 0x240 == 0x040: Stack Underflow |
1047 | * swd & 0x240 == 0x240: Stack Overflow | 1045 | * swd & 0x240 == 0x240: Stack Overflow |
1048 | * User must clear the SF bit (0x40) if set | 1046 | * User must clear the SF bit (0x40) if set |
1049 | */ | 1047 | */ |
1050 | info.si_code = FPE_FLTINV; | 1048 | info.si_code = FPE_FLTINV; |
1051 | break; | 1049 | break; |
1052 | case 0x002: /* Denormalize */ | 1050 | case 0x002: /* Denormalize */ |
1053 | case 0x010: /* Underflow */ | 1051 | case 0x010: /* Underflow */ |
1054 | info.si_code = FPE_FLTUND; | 1052 | info.si_code = FPE_FLTUND; |
1055 | break; | 1053 | break; |
1056 | case 0x004: /* Zero Divide */ | 1054 | case 0x004: /* Zero Divide */ |
1057 | info.si_code = FPE_FLTDIV; | 1055 | info.si_code = FPE_FLTDIV; |
1058 | break; | 1056 | break; |
1059 | case 0x008: /* Overflow */ | 1057 | case 0x008: /* Overflow */ |
1060 | info.si_code = FPE_FLTOVF; | 1058 | info.si_code = FPE_FLTOVF; |
1061 | break; | 1059 | break; |
1062 | case 0x020: /* Precision */ | 1060 | case 0x020: /* Precision */ |
1063 | info.si_code = FPE_FLTRES; | 1061 | info.si_code = FPE_FLTRES; |
1064 | break; | 1062 | break; |
1065 | } | 1063 | } |
1066 | force_sig_info(SIGFPE, &info, task); | 1064 | force_sig_info(SIGFPE, &info, task); |
1067 | } | 1065 | } |
@@ -1074,7 +1072,7 @@ asmlinkage void bad_intr(void) | |||
1074 | asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) | 1072 | asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) |
1075 | { | 1073 | { |
1076 | void __user *ip = (void __user *)(regs->ip); | 1074 | void __user *ip = (void __user *)(regs->ip); |
1077 | struct task_struct * task; | 1075 | struct task_struct *task; |
1078 | siginfo_t info; | 1076 | siginfo_t info; |
1079 | unsigned short mxcsr; | 1077 | unsigned short mxcsr; |
1080 | 1078 | ||
@@ -1102,25 +1100,25 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) | |||
1102 | */ | 1100 | */ |
1103 | mxcsr = get_fpu_mxcsr(task); | 1101 | mxcsr = get_fpu_mxcsr(task); |
1104 | switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { | 1102 | switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { |
1105 | case 0x000: | 1103 | case 0x000: |
1106 | default: | 1104 | default: |
1107 | break; | 1105 | break; |
1108 | case 0x001: /* Invalid Op */ | 1106 | case 0x001: /* Invalid Op */ |
1109 | info.si_code = FPE_FLTINV; | 1107 | info.si_code = FPE_FLTINV; |
1110 | break; | 1108 | break; |
1111 | case 0x002: /* Denormalize */ | 1109 | case 0x002: /* Denormalize */ |
1112 | case 0x010: /* Underflow */ | 1110 | case 0x010: /* Underflow */ |
1113 | info.si_code = FPE_FLTUND; | 1111 | info.si_code = FPE_FLTUND; |
1114 | break; | 1112 | break; |
1115 | case 0x004: /* Zero Divide */ | 1113 | case 0x004: /* Zero Divide */ |
1116 | info.si_code = FPE_FLTDIV; | 1114 | info.si_code = FPE_FLTDIV; |
1117 | break; | 1115 | break; |
1118 | case 0x008: /* Overflow */ | 1116 | case 0x008: /* Overflow */ |
1119 | info.si_code = FPE_FLTOVF; | 1117 | info.si_code = FPE_FLTOVF; |
1120 | break; | 1118 | break; |
1121 | case 0x020: /* Precision */ | 1119 | case 0x020: /* Precision */ |
1122 | info.si_code = FPE_FLTRES; | 1120 | info.si_code = FPE_FLTRES; |
1123 | break; | 1121 | break; |
1124 | } | 1122 | } |
1125 | force_sig_info(SIGFPE, &info, task); | 1123 | force_sig_info(SIGFPE, &info, task); |
1126 | } | 1124 | } |
@@ -1138,7 +1136,7 @@ asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void) | |||
1138 | } | 1136 | } |
1139 | 1137 | ||
1140 | /* | 1138 | /* |
1141 | * 'math_state_restore()' saves the current math information in the | 1139 | * 'math_state_restore()' saves the current math information in the |
1142 | * old math state array, and gets the new ones from the current task | 1140 | * old math state array, and gets the new ones from the current task |
1143 | * | 1141 | * |
1144 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. | 1142 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. |
@@ -1163,7 +1161,7 @@ asmlinkage void math_state_restore(void) | |||
1163 | local_irq_disable(); | 1161 | local_irq_disable(); |
1164 | } | 1162 | } |
1165 | 1163 | ||
1166 | clts(); /* Allow maths ops (or we recurse) */ | 1164 | clts(); /* Allow maths ops (or we recurse) */ |
1167 | restore_fpu_checking(&me->thread.xstate->fxsave); | 1165 | restore_fpu_checking(&me->thread.xstate->fxsave); |
1168 | task_thread_info(me)->status |= TS_USEDFPU; | 1166 | task_thread_info(me)->status |= TS_USEDFPU; |
1169 | me->fpu_counter++; | 1167 | me->fpu_counter++; |
@@ -1172,64 +1170,61 @@ EXPORT_SYMBOL_GPL(math_state_restore); | |||
1172 | 1170 | ||
1173 | void __init trap_init(void) | 1171 | void __init trap_init(void) |
1174 | { | 1172 | { |
1175 | set_intr_gate(0,÷_error); | 1173 | set_intr_gate(0, ÷_error); |
1176 | set_intr_gate_ist(1,&debug,DEBUG_STACK); | 1174 | set_intr_gate_ist(1, &debug, DEBUG_STACK); |
1177 | set_intr_gate_ist(2,&nmi,NMI_STACK); | 1175 | set_intr_gate_ist(2, &nmi, NMI_STACK); |
1178 | set_system_gate_ist(3,&int3,DEBUG_STACK); /* int3 can be called from all */ | 1176 | set_system_gate_ist(3, &int3, DEBUG_STACK); /* int3 can be called from all */ |
1179 | set_system_gate(4,&overflow); /* int4 can be called from all */ | 1177 | set_system_gate(4, &overflow); /* int4 can be called from all */ |
1180 | set_intr_gate(5,&bounds); | 1178 | set_intr_gate(5, &bounds); |
1181 | set_intr_gate(6,&invalid_op); | 1179 | set_intr_gate(6, &invalid_op); |
1182 | set_intr_gate(7,&device_not_available); | 1180 | set_intr_gate(7, &device_not_available); |
1183 | set_intr_gate_ist(8,&double_fault, DOUBLEFAULT_STACK); | 1181 | set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK); |
1184 | set_intr_gate(9,&coprocessor_segment_overrun); | 1182 | set_intr_gate(9, &coprocessor_segment_overrun); |
1185 | set_intr_gate(10,&invalid_TSS); | 1183 | set_intr_gate(10, &invalid_TSS); |
1186 | set_intr_gate(11,&segment_not_present); | 1184 | set_intr_gate(11, &segment_not_present); |
1187 | set_intr_gate_ist(12,&stack_segment,STACKFAULT_STACK); | 1185 | set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK); |
1188 | set_intr_gate(13,&general_protection); | 1186 | set_intr_gate(13, &general_protection); |
1189 | set_intr_gate(14,&page_fault); | 1187 | set_intr_gate(14, &page_fault); |
1190 | set_intr_gate(15,&spurious_interrupt_bug); | 1188 | set_intr_gate(15, &spurious_interrupt_bug); |
1191 | set_intr_gate(16,&coprocessor_error); | 1189 | set_intr_gate(16, &coprocessor_error); |
1192 | set_intr_gate(17,&alignment_check); | 1190 | set_intr_gate(17, &alignment_check); |
1193 | #ifdef CONFIG_X86_MCE | 1191 | #ifdef CONFIG_X86_MCE |
1194 | set_intr_gate_ist(18,&machine_check, MCE_STACK); | 1192 | set_intr_gate_ist(18, &machine_check, MCE_STACK); |
1195 | #endif | 1193 | #endif |
1196 | set_intr_gate(19,&simd_coprocessor_error); | 1194 | set_intr_gate(19, &simd_coprocessor_error); |
1197 | 1195 | ||
1198 | #ifdef CONFIG_IA32_EMULATION | 1196 | #ifdef CONFIG_IA32_EMULATION |
1199 | set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall); | 1197 | set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall); |
1200 | #endif | 1198 | #endif |
1201 | |||
1202 | /* | 1199 | /* |
1203 | * initialize the per thread extended state: | 1200 | * initialize the per thread extended state: |
1204 | */ | 1201 | */ |
1205 | init_thread_xstate(); | 1202 | init_thread_xstate(); |
1206 | /* | 1203 | /* |
1207 | * Should be a barrier for any external CPU state. | 1204 | * Should be a barrier for any external CPU state: |
1208 | */ | 1205 | */ |
1209 | cpu_init(); | 1206 | cpu_init(); |
1210 | } | 1207 | } |
1211 | 1208 | ||
1212 | |||
1213 | static int __init oops_setup(char *s) | 1209 | static int __init oops_setup(char *s) |
1214 | { | 1210 | { |
1215 | if (!s) | 1211 | if (!s) |
1216 | return -EINVAL; | 1212 | return -EINVAL; |
1217 | if (!strcmp(s, "panic")) | 1213 | if (!strcmp(s, "panic")) |
1218 | panic_on_oops = 1; | 1214 | panic_on_oops = 1; |
1219 | return 0; | 1215 | return 0; |
1220 | } | 1216 | } |
1221 | early_param("oops", oops_setup); | 1217 | early_param("oops", oops_setup); |
1222 | 1218 | ||
1223 | static int __init kstack_setup(char *s) | 1219 | static int __init kstack_setup(char *s) |
1224 | { | 1220 | { |
1225 | if (!s) | 1221 | if (!s) |
1226 | return -EINVAL; | 1222 | return -EINVAL; |
1227 | kstack_depth_to_print = simple_strtoul(s,NULL,0); | 1223 | kstack_depth_to_print = simple_strtoul(s, NULL, 0); |
1228 | return 0; | 1224 | return 0; |
1229 | } | 1225 | } |
1230 | early_param("kstack", kstack_setup); | 1226 | early_param("kstack", kstack_setup); |
1231 | 1227 | ||
1232 | |||
1233 | static int __init code_bytes_setup(char *s) | 1228 | static int __init code_bytes_setup(char *s) |
1234 | { | 1229 | { |
1235 | code_bytes = simple_strtoul(s, NULL, 0); | 1230 | code_bytes = simple_strtoul(s, NULL, 0); |