diff options
Diffstat (limited to 'arch/sh/kernel/traps.c')
-rw-r--r-- | arch/sh/kernel/traps.c | 200 |
1 files changed, 147 insertions, 53 deletions
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index 53dfa55f3156..3762d9dc2046 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c | |||
@@ -18,13 +18,14 @@ | |||
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/kallsyms.h> | 19 | #include <linux/kallsyms.h> |
20 | #include <linux/io.h> | 20 | #include <linux/io.h> |
21 | #include <linux/debug_locks.h> | ||
21 | #include <asm/system.h> | 22 | #include <asm/system.h> |
22 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
23 | 24 | ||
24 | #ifdef CONFIG_SH_KGDB | 25 | #ifdef CONFIG_SH_KGDB |
25 | #include <asm/kgdb.h> | 26 | #include <asm/kgdb.h> |
26 | #define CHK_REMOTE_DEBUG(regs) \ | 27 | #define CHK_REMOTE_DEBUG(regs) \ |
27 | { \ | 28 | { \ |
28 | if (kgdb_debug_hook && !user_mode(regs))\ | 29 | if (kgdb_debug_hook && !user_mode(regs))\ |
29 | (*kgdb_debug_hook)(regs); \ | 30 | (*kgdb_debug_hook)(regs); \ |
30 | } | 31 | } |
@@ -33,8 +34,13 @@ | |||
33 | #endif | 34 | #endif |
34 | 35 | ||
35 | #ifdef CONFIG_CPU_SH2 | 36 | #ifdef CONFIG_CPU_SH2 |
36 | #define TRAP_RESERVED_INST 4 | 37 | # define TRAP_RESERVED_INST 4 |
37 | #define TRAP_ILLEGAL_SLOT_INST 6 | 38 | # define TRAP_ILLEGAL_SLOT_INST 6 |
39 | # define TRAP_ADDRESS_ERROR 9 | ||
40 | # ifdef CONFIG_CPU_SH2A | ||
41 | # define TRAP_DIVZERO_ERROR 17 | ||
42 | # define TRAP_DIVOVF_ERROR 18 | ||
43 | # endif | ||
38 | #else | 44 | #else |
39 | #define TRAP_RESERVED_INST 12 | 45 | #define TRAP_RESERVED_INST 12 |
40 | #define TRAP_ILLEGAL_SLOT_INST 13 | 46 | #define TRAP_ILLEGAL_SLOT_INST 13 |
@@ -88,7 +94,7 @@ void die(const char * str, struct pt_regs * regs, long err) | |||
88 | 94 | ||
89 | if (!user_mode(regs) || in_interrupt()) | 95 | if (!user_mode(regs) || in_interrupt()) |
90 | dump_mem("Stack: ", regs->regs[15], THREAD_SIZE + | 96 | dump_mem("Stack: ", regs->regs[15], THREAD_SIZE + |
91 | (unsigned long)task_stack_page(current)); | 97 | (unsigned long)task_stack_page(current)); |
92 | 98 | ||
93 | bust_spinlocks(0); | 99 | bust_spinlocks(0); |
94 | spin_unlock_irq(&die_lock); | 100 | spin_unlock_irq(&die_lock); |
@@ -102,8 +108,6 @@ static inline void die_if_kernel(const char *str, struct pt_regs *regs, | |||
102 | die(str, regs, err); | 108 | die(str, regs, err); |
103 | } | 109 | } |
104 | 110 | ||
105 | static int handle_unaligned_notify_count = 10; | ||
106 | |||
107 | /* | 111 | /* |
108 | * try and fix up kernelspace address errors | 112 | * try and fix up kernelspace address errors |
109 | * - userspace errors just cause EFAULT to be returned, resulting in SEGV | 113 | * - userspace errors just cause EFAULT to be returned, resulting in SEGV |
@@ -198,7 +202,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) | |||
198 | if (copy_to_user(dst,src,4)) | 202 | if (copy_to_user(dst,src,4)) |
199 | goto fetch_fault; | 203 | goto fetch_fault; |
200 | ret = 0; | 204 | ret = 0; |
201 | break; | 205 | break; |
202 | 206 | ||
203 | case 2: /* mov.[bwl] to memory, possibly with pre-decrement */ | 207 | case 2: /* mov.[bwl] to memory, possibly with pre-decrement */ |
204 | if (instruction & 4) | 208 | if (instruction & 4) |
@@ -222,7 +226,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) | |||
222 | if (copy_from_user(dst,src,4)) | 226 | if (copy_from_user(dst,src,4)) |
223 | goto fetch_fault; | 227 | goto fetch_fault; |
224 | ret = 0; | 228 | ret = 0; |
225 | break; | 229 | break; |
226 | 230 | ||
227 | case 6: /* mov.[bwl] from memory, possibly with post-increment */ | 231 | case 6: /* mov.[bwl] from memory, possibly with post-increment */ |
228 | src = (unsigned char*) *rm; | 232 | src = (unsigned char*) *rm; |
@@ -230,7 +234,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) | |||
230 | *rm += count; | 234 | *rm += count; |
231 | dst = (unsigned char*) rn; | 235 | dst = (unsigned char*) rn; |
232 | *(unsigned long*)dst = 0; | 236 | *(unsigned long*)dst = 0; |
233 | 237 | ||
234 | #ifdef __LITTLE_ENDIAN__ | 238 | #ifdef __LITTLE_ENDIAN__ |
235 | if (copy_from_user(dst, src, count)) | 239 | if (copy_from_user(dst, src, count)) |
236 | goto fetch_fault; | 240 | goto fetch_fault; |
@@ -241,7 +245,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) | |||
241 | } | 245 | } |
242 | #else | 246 | #else |
243 | dst += 4-count; | 247 | dst += 4-count; |
244 | 248 | ||
245 | if (copy_from_user(dst, src, count)) | 249 | if (copy_from_user(dst, src, count)) |
246 | goto fetch_fault; | 250 | goto fetch_fault; |
247 | 251 | ||
@@ -320,7 +324,8 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs) | |||
320 | return -EFAULT; | 324 | return -EFAULT; |
321 | 325 | ||
322 | /* kernel */ | 326 | /* kernel */ |
323 | die("delay-slot-insn faulting in handle_unaligned_delayslot", regs, 0); | 327 | die("delay-slot-insn faulting in handle_unaligned_delayslot", |
328 | regs, 0); | ||
324 | } | 329 | } |
325 | 330 | ||
326 | return handle_unaligned_ins(instruction,regs); | 331 | return handle_unaligned_ins(instruction,regs); |
@@ -342,6 +347,13 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs) | |||
342 | #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4) | 347 | #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4) |
343 | #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4) | 348 | #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4) |
344 | 349 | ||
350 | /* | ||
351 | * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit | ||
352 | * opcodes.. | ||
353 | */ | ||
354 | #ifndef CONFIG_CPU_SH2A | ||
355 | static int handle_unaligned_notify_count = 10; | ||
356 | |||
345 | static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) | 357 | static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) |
346 | { | 358 | { |
347 | u_int rm; | 359 | u_int rm; |
@@ -354,7 +366,8 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) | |||
354 | if (user_mode(regs) && handle_unaligned_notify_count>0) { | 366 | if (user_mode(regs) && handle_unaligned_notify_count>0) { |
355 | handle_unaligned_notify_count--; | 367 | handle_unaligned_notify_count--; |
356 | 368 | ||
357 | printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", | 369 | printk(KERN_NOTICE "Fixing up unaligned userspace access " |
370 | "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", | ||
358 | current->comm,current->pid,(u16*)regs->pc,instruction); | 371 | current->comm,current->pid,(u16*)regs->pc,instruction); |
359 | } | 372 | } |
360 | 373 | ||
@@ -478,32 +491,58 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) | |||
478 | regs->pc += 2; | 491 | regs->pc += 2; |
479 | return ret; | 492 | return ret; |
480 | } | 493 | } |
494 | #endif /* CONFIG_CPU_SH2A */ | ||
495 | |||
496 | #ifdef CONFIG_CPU_HAS_SR_RB | ||
497 | #define lookup_exception_vector(x) \ | ||
498 | __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x))) | ||
499 | #else | ||
500 | #define lookup_exception_vector(x) \ | ||
501 | __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x))) | ||
502 | #endif | ||
481 | 503 | ||
482 | /* | 504 | /* |
483 | * Handle various address error exceptions | 505 | * Handle various address error exceptions: |
506 | * - instruction address error: | ||
507 | * misaligned PC | ||
508 | * PC >= 0x80000000 in user mode | ||
509 | * - data address error (read and write) | ||
510 | * misaligned data access | ||
511 | * access to >= 0x80000000 is user mode | ||
512 | * Unfortuntaly we can't distinguish between instruction address error | ||
513 | * and data address errors caused by read acceses. | ||
484 | */ | 514 | */ |
485 | asmlinkage void do_address_error(struct pt_regs *regs, | 515 | asmlinkage void do_address_error(struct pt_regs *regs, |
486 | unsigned long writeaccess, | 516 | unsigned long writeaccess, |
487 | unsigned long address) | 517 | unsigned long address) |
488 | { | 518 | { |
489 | unsigned long error_code; | 519 | unsigned long error_code = 0; |
490 | mm_segment_t oldfs; | 520 | mm_segment_t oldfs; |
521 | siginfo_t info; | ||
522 | #ifndef CONFIG_CPU_SH2A | ||
491 | u16 instruction; | 523 | u16 instruction; |
492 | int tmp; | 524 | int tmp; |
525 | #endif | ||
493 | 526 | ||
494 | asm volatile("stc r2_bank,%0": "=r" (error_code)); | 527 | /* Intentional ifdef */ |
528 | #ifdef CONFIG_CPU_HAS_SR_RB | ||
529 | lookup_exception_vector(error_code); | ||
530 | #endif | ||
495 | 531 | ||
496 | oldfs = get_fs(); | 532 | oldfs = get_fs(); |
497 | 533 | ||
498 | if (user_mode(regs)) { | 534 | if (user_mode(regs)) { |
535 | int si_code = BUS_ADRERR; | ||
536 | |||
499 | local_irq_enable(); | 537 | local_irq_enable(); |
500 | current->thread.error_code = error_code; | ||
501 | current->thread.trap_no = (writeaccess) ? 8 : 7; | ||
502 | 538 | ||
503 | /* bad PC is not something we can fix */ | 539 | /* bad PC is not something we can fix */ |
504 | if (regs->pc & 1) | 540 | if (regs->pc & 1) { |
541 | si_code = BUS_ADRALN; | ||
505 | goto uspace_segv; | 542 | goto uspace_segv; |
543 | } | ||
506 | 544 | ||
545 | #ifndef CONFIG_CPU_SH2A | ||
507 | set_fs(USER_DS); | 546 | set_fs(USER_DS); |
508 | if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { | 547 | if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { |
509 | /* Argh. Fault on the instruction itself. | 548 | /* Argh. Fault on the instruction itself. |
@@ -518,14 +557,23 @@ asmlinkage void do_address_error(struct pt_regs *regs, | |||
518 | 557 | ||
519 | if (tmp==0) | 558 | if (tmp==0) |
520 | return; /* sorted */ | 559 | return; /* sorted */ |
560 | #endif | ||
521 | 561 | ||
522 | uspace_segv: | 562 | uspace_segv: |
523 | printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm); | 563 | printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned " |
524 | force_sig(SIGSEGV, current); | 564 | "access (PC %lx PR %lx)\n", current->comm, regs->pc, |
565 | regs->pr); | ||
566 | |||
567 | info.si_signo = SIGBUS; | ||
568 | info.si_errno = 0; | ||
569 | info.si_code = si_code; | ||
570 | info.si_addr = (void *) address; | ||
571 | force_sig_info(SIGBUS, &info, current); | ||
525 | } else { | 572 | } else { |
526 | if (regs->pc & 1) | 573 | if (regs->pc & 1) |
527 | die("unaligned program counter", regs, error_code); | 574 | die("unaligned program counter", regs, error_code); |
528 | 575 | ||
576 | #ifndef CONFIG_CPU_SH2A | ||
529 | set_fs(KERNEL_DS); | 577 | set_fs(KERNEL_DS); |
530 | if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { | 578 | if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { |
531 | /* Argh. Fault on the instruction itself. | 579 | /* Argh. Fault on the instruction itself. |
@@ -537,6 +585,12 @@ asmlinkage void do_address_error(struct pt_regs *regs, | |||
537 | 585 | ||
538 | handle_unaligned_access(instruction, regs); | 586 | handle_unaligned_access(instruction, regs); |
539 | set_fs(oldfs); | 587 | set_fs(oldfs); |
588 | #else | ||
589 | printk(KERN_NOTICE "Killing process \"%s\" due to unaligned " | ||
590 | "access\n", current->comm); | ||
591 | |||
592 | force_sig(SIGSEGV, current); | ||
593 | #endif | ||
540 | } | 594 | } |
541 | } | 595 | } |
542 | 596 | ||
@@ -548,7 +602,7 @@ int is_dsp_inst(struct pt_regs *regs) | |||
548 | { | 602 | { |
549 | unsigned short inst; | 603 | unsigned short inst; |
550 | 604 | ||
551 | /* | 605 | /* |
552 | * Safe guard if DSP mode is already enabled or we're lacking | 606 | * Safe guard if DSP mode is already enabled or we're lacking |
553 | * the DSP altogether. | 607 | * the DSP altogether. |
554 | */ | 608 | */ |
@@ -569,27 +623,49 @@ int is_dsp_inst(struct pt_regs *regs) | |||
569 | #define is_dsp_inst(regs) (0) | 623 | #define is_dsp_inst(regs) (0) |
570 | #endif /* CONFIG_SH_DSP */ | 624 | #endif /* CONFIG_SH_DSP */ |
571 | 625 | ||
626 | #ifdef CONFIG_CPU_SH2A | ||
627 | asmlinkage void do_divide_error(unsigned long r4, unsigned long r5, | ||
628 | unsigned long r6, unsigned long r7, | ||
629 | struct pt_regs __regs) | ||
630 | { | ||
631 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
632 | siginfo_t info; | ||
633 | |||
634 | switch (r4) { | ||
635 | case TRAP_DIVZERO_ERROR: | ||
636 | info.si_code = FPE_INTDIV; | ||
637 | break; | ||
638 | case TRAP_DIVOVF_ERROR: | ||
639 | info.si_code = FPE_INTOVF; | ||
640 | break; | ||
641 | } | ||
642 | |||
643 | force_sig_info(SIGFPE, &info, current); | ||
644 | } | ||
645 | #endif | ||
646 | |||
572 | /* arch/sh/kernel/cpu/sh4/fpu.c */ | 647 | /* arch/sh/kernel/cpu/sh4/fpu.c */ |
573 | extern int do_fpu_inst(unsigned short, struct pt_regs *); | 648 | extern int do_fpu_inst(unsigned short, struct pt_regs *); |
574 | extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5, | 649 | extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5, |
575 | unsigned long r6, unsigned long r7, struct pt_regs regs); | 650 | unsigned long r6, unsigned long r7, struct pt_regs __regs); |
576 | 651 | ||
577 | asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, | 652 | asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, |
578 | unsigned long r6, unsigned long r7, | 653 | unsigned long r6, unsigned long r7, |
579 | struct pt_regs regs) | 654 | struct pt_regs __regs) |
580 | { | 655 | { |
656 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
581 | unsigned long error_code; | 657 | unsigned long error_code; |
582 | struct task_struct *tsk = current; | 658 | struct task_struct *tsk = current; |
583 | 659 | ||
584 | #ifdef CONFIG_SH_FPU_EMU | 660 | #ifdef CONFIG_SH_FPU_EMU |
585 | unsigned short inst; | 661 | unsigned short inst = 0; |
586 | int err; | 662 | int err; |
587 | 663 | ||
588 | get_user(inst, (unsigned short*)regs.pc); | 664 | get_user(inst, (unsigned short*)regs->pc); |
589 | 665 | ||
590 | err = do_fpu_inst(inst, ®s); | 666 | err = do_fpu_inst(inst, regs); |
591 | if (!err) { | 667 | if (!err) { |
592 | regs.pc += 2; | 668 | regs->pc += 2; |
593 | return; | 669 | return; |
594 | } | 670 | } |
595 | /* not a FPU inst. */ | 671 | /* not a FPU inst. */ |
@@ -597,20 +673,19 @@ asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, | |||
597 | 673 | ||
598 | #ifdef CONFIG_SH_DSP | 674 | #ifdef CONFIG_SH_DSP |
599 | /* Check if it's a DSP instruction */ | 675 | /* Check if it's a DSP instruction */ |
600 | if (is_dsp_inst(®s)) { | 676 | if (is_dsp_inst(regs)) { |
601 | /* Enable DSP mode, and restart instruction. */ | 677 | /* Enable DSP mode, and restart instruction. */ |
602 | regs.sr |= SR_DSP; | 678 | regs->sr |= SR_DSP; |
603 | return; | 679 | return; |
604 | } | 680 | } |
605 | #endif | 681 | #endif |
606 | 682 | ||
607 | asm volatile("stc r2_bank, %0": "=r" (error_code)); | 683 | lookup_exception_vector(error_code); |
684 | |||
608 | local_irq_enable(); | 685 | local_irq_enable(); |
609 | tsk->thread.error_code = error_code; | 686 | CHK_REMOTE_DEBUG(regs); |
610 | tsk->thread.trap_no = TRAP_RESERVED_INST; | ||
611 | CHK_REMOTE_DEBUG(®s); | ||
612 | force_sig(SIGILL, tsk); | 687 | force_sig(SIGILL, tsk); |
613 | die_if_no_fixup("reserved instruction", ®s, error_code); | 688 | die_if_no_fixup("reserved instruction", regs, error_code); |
614 | } | 689 | } |
615 | 690 | ||
616 | #ifdef CONFIG_SH_FPU_EMU | 691 | #ifdef CONFIG_SH_FPU_EMU |
@@ -658,39 +733,41 @@ static int emulate_branch(unsigned short inst, struct pt_regs* regs) | |||
658 | 733 | ||
659 | asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, | 734 | asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, |
660 | unsigned long r6, unsigned long r7, | 735 | unsigned long r6, unsigned long r7, |
661 | struct pt_regs regs) | 736 | struct pt_regs __regs) |
662 | { | 737 | { |
738 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
663 | unsigned long error_code; | 739 | unsigned long error_code; |
664 | struct task_struct *tsk = current; | 740 | struct task_struct *tsk = current; |
665 | #ifdef CONFIG_SH_FPU_EMU | 741 | #ifdef CONFIG_SH_FPU_EMU |
666 | unsigned short inst; | 742 | unsigned short inst = 0; |
667 | 743 | ||
668 | get_user(inst, (unsigned short *)regs.pc + 1); | 744 | get_user(inst, (unsigned short *)regs->pc + 1); |
669 | if (!do_fpu_inst(inst, ®s)) { | 745 | if (!do_fpu_inst(inst, regs)) { |
670 | get_user(inst, (unsigned short *)regs.pc); | 746 | get_user(inst, (unsigned short *)regs->pc); |
671 | if (!emulate_branch(inst, ®s)) | 747 | if (!emulate_branch(inst, regs)) |
672 | return; | 748 | return; |
673 | /* fault in branch.*/ | 749 | /* fault in branch.*/ |
674 | } | 750 | } |
675 | /* not a FPU inst. */ | 751 | /* not a FPU inst. */ |
676 | #endif | 752 | #endif |
677 | 753 | ||
678 | asm volatile("stc r2_bank, %0": "=r" (error_code)); | 754 | lookup_exception_vector(error_code); |
755 | |||
679 | local_irq_enable(); | 756 | local_irq_enable(); |
680 | tsk->thread.error_code = error_code; | 757 | CHK_REMOTE_DEBUG(regs); |
681 | tsk->thread.trap_no = TRAP_RESERVED_INST; | ||
682 | CHK_REMOTE_DEBUG(®s); | ||
683 | force_sig(SIGILL, tsk); | 758 | force_sig(SIGILL, tsk); |
684 | die_if_no_fixup("illegal slot instruction", ®s, error_code); | 759 | die_if_no_fixup("illegal slot instruction", regs, error_code); |
685 | } | 760 | } |
686 | 761 | ||
687 | asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, | 762 | asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, |
688 | unsigned long r6, unsigned long r7, | 763 | unsigned long r6, unsigned long r7, |
689 | struct pt_regs regs) | 764 | struct pt_regs __regs) |
690 | { | 765 | { |
766 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
691 | long ex; | 767 | long ex; |
692 | asm volatile("stc r2_bank, %0" : "=r" (ex)); | 768 | |
693 | die_if_kernel("exception", ®s, ex); | 769 | lookup_exception_vector(ex); |
770 | die_if_kernel("exception", regs, ex); | ||
694 | } | 771 | } |
695 | 772 | ||
696 | #if defined(CONFIG_SH_STANDARD_BIOS) | 773 | #if defined(CONFIG_SH_STANDARD_BIOS) |
@@ -735,12 +812,16 @@ void *set_exception_table_vec(unsigned int vec, void *handler) | |||
735 | { | 812 | { |
736 | extern void *exception_handling_table[]; | 813 | extern void *exception_handling_table[]; |
737 | void *old_handler; | 814 | void *old_handler; |
738 | 815 | ||
739 | old_handler = exception_handling_table[vec]; | 816 | old_handler = exception_handling_table[vec]; |
740 | exception_handling_table[vec] = handler; | 817 | exception_handling_table[vec] = handler; |
741 | return old_handler; | 818 | return old_handler; |
742 | } | 819 | } |
743 | 820 | ||
821 | extern asmlinkage void address_error_handler(unsigned long r4, unsigned long r5, | ||
822 | unsigned long r6, unsigned long r7, | ||
823 | struct pt_regs __regs); | ||
824 | |||
744 | void __init trap_init(void) | 825 | void __init trap_init(void) |
745 | { | 826 | { |
746 | set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst); | 827 | set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst); |
@@ -759,7 +840,15 @@ void __init trap_init(void) | |||
759 | set_exception_table_evt(0x800, do_fpu_state_restore); | 840 | set_exception_table_evt(0x800, do_fpu_state_restore); |
760 | set_exception_table_evt(0x820, do_fpu_state_restore); | 841 | set_exception_table_evt(0x820, do_fpu_state_restore); |
761 | #endif | 842 | #endif |
762 | 843 | ||
844 | #ifdef CONFIG_CPU_SH2 | ||
845 | set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_handler); | ||
846 | #endif | ||
847 | #ifdef CONFIG_CPU_SH2A | ||
848 | set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error); | ||
849 | set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error); | ||
850 | #endif | ||
851 | |||
763 | /* Setup VBR for boot cpu */ | 852 | /* Setup VBR for boot cpu */ |
764 | per_cpu_trap_init(); | 853 | per_cpu_trap_init(); |
765 | } | 854 | } |
@@ -784,6 +873,11 @@ void show_trace(struct task_struct *tsk, unsigned long *sp, | |||
784 | } | 873 | } |
785 | 874 | ||
786 | printk("\n"); | 875 | printk("\n"); |
876 | |||
877 | if (!tsk) | ||
878 | tsk = current; | ||
879 | |||
880 | debug_show_held_locks(tsk); | ||
787 | } | 881 | } |
788 | 882 | ||
789 | void show_stack(struct task_struct *tsk, unsigned long *sp) | 883 | void show_stack(struct task_struct *tsk, unsigned long *sp) |