aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sh/kernel/traps.c88
1 files changed, 79 insertions, 9 deletions
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index 53dfa55f3156..1edec2709efe 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -33,8 +33,13 @@
33#endif 33#endif
34 34
35#ifdef CONFIG_CPU_SH2 35#ifdef CONFIG_CPU_SH2
36#define TRAP_RESERVED_INST 4 36# define TRAP_RESERVED_INST 4
37#define TRAP_ILLEGAL_SLOT_INST 6 37# define TRAP_ILLEGAL_SLOT_INST 6
38# define TRAP_ADDRESS_ERROR 9
39# ifdef CONFIG_CPU_SH2A
40# define TRAP_DIVZERO_ERROR 17
41# define TRAP_DIVOVF_ERROR 18
42# endif
38#else 43#else
39#define TRAP_RESERVED_INST 12 44#define TRAP_RESERVED_INST 12
40#define TRAP_ILLEGAL_SLOT_INST 13 45#define TRAP_ILLEGAL_SLOT_INST 13
@@ -479,6 +484,14 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
479 return ret; 484 return ret;
480} 485}
481 486
487#ifdef CONFIG_CPU_HAS_SR_RB
488#define lookup_exception_vector(x) \
489 __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
490#else
491#define lookup_exception_vector(x) \
492 __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
493#endif
494
482/* 495/*
483 * Handle various address error exceptions 496 * Handle various address error exceptions
484 */ 497 */
@@ -486,24 +499,37 @@ asmlinkage void do_address_error(struct pt_regs *regs,
486 unsigned long writeaccess, 499 unsigned long writeaccess,
487 unsigned long address) 500 unsigned long address)
488{ 501{
489 unsigned long error_code; 502 unsigned long error_code = 0;
490 mm_segment_t oldfs; 503 mm_segment_t oldfs;
491 u16 instruction; 504 u16 instruction;
492 int tmp; 505 int tmp;
493 506
494 asm volatile("stc r2_bank,%0": "=r" (error_code)); 507 /* Intentional ifdef */
508#ifdef CONFIG_CPU_HAS_SR_RB
509 lookup_exception_vector(error_code);
510#endif
495 511
496 oldfs = get_fs(); 512 oldfs = get_fs();
497 513
498 if (user_mode(regs)) { 514 if (user_mode(regs)) {
499 local_irq_enable(); 515 local_irq_enable();
500 current->thread.error_code = error_code; 516 current->thread.error_code = error_code;
517#ifdef CONFIG_CPU_SH2
518 /*
519 * On the SH-2, we only have a single vector for address
520 * errors, there's no differentiating between a load error
521 * and a store error.
522 */
523 current->thread.trap_no = 9;
524#else
501 current->thread.trap_no = (writeaccess) ? 8 : 7; 525 current->thread.trap_no = (writeaccess) ? 8 : 7;
526#endif
502 527
503 /* bad PC is not something we can fix */ 528 /* bad PC is not something we can fix */
504 if (regs->pc & 1) 529 if (regs->pc & 1)
505 goto uspace_segv; 530 goto uspace_segv;
506 531
532#ifndef CONFIG_CPU_SH2A
507 set_fs(USER_DS); 533 set_fs(USER_DS);
508 if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { 534 if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
509 /* Argh. Fault on the instruction itself. 535 /* Argh. Fault on the instruction itself.
@@ -518,6 +544,7 @@ asmlinkage void do_address_error(struct pt_regs *regs,
518 544
519 if (tmp==0) 545 if (tmp==0)
520 return; /* sorted */ 546 return; /* sorted */
547#endif
521 548
522 uspace_segv: 549 uspace_segv:
523 printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm); 550 printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm);
@@ -526,6 +553,7 @@ asmlinkage void do_address_error(struct pt_regs *regs,
526 if (regs->pc & 1) 553 if (regs->pc & 1)
527 die("unaligned program counter", regs, error_code); 554 die("unaligned program counter", regs, error_code);
528 555
556#ifndef CONFIG_CPU_SH2A
529 set_fs(KERNEL_DS); 557 set_fs(KERNEL_DS);
530 if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { 558 if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
531 /* Argh. Fault on the instruction itself. 559 /* Argh. Fault on the instruction itself.
@@ -537,6 +565,10 @@ asmlinkage void do_address_error(struct pt_regs *regs,
537 565
538 handle_unaligned_access(instruction, regs); 566 handle_unaligned_access(instruction, regs);
539 set_fs(oldfs); 567 set_fs(oldfs);
568#else
569 printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm);
570 force_sig(SIGSEGV, current);
571#endif
540 } 572 }
541} 573}
542 574
@@ -569,6 +601,29 @@ int is_dsp_inst(struct pt_regs *regs)
569#define is_dsp_inst(regs) (0) 601#define is_dsp_inst(regs) (0)
570#endif /* CONFIG_SH_DSP */ 602#endif /* CONFIG_SH_DSP */
571 603
604#ifdef CONFIG_CPU_SH2A
605asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
606 unsigned long r6, unsigned long r7,
607 struct pt_regs regs)
608{
609 siginfo_t info;
610
611 current->thread.trap_no = r4;
612 current->thread.error_code = 0;
613
614 switch (r4) {
615 case TRAP_DIVZERO_ERROR:
616 info.si_code = FPE_INTDIV;
617 break;
618 case TRAP_DIVOVF_ERROR:
619 info.si_code = FPE_INTOVF;
620 break;
621 }
622
623 force_sig_info(SIGFPE, &info, current);
624}
625#endif
626
572/* arch/sh/kernel/cpu/sh4/fpu.c */ 627/* arch/sh/kernel/cpu/sh4/fpu.c */
573extern int do_fpu_inst(unsigned short, struct pt_regs *); 628extern int do_fpu_inst(unsigned short, struct pt_regs *);
574extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5, 629extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5,
@@ -582,7 +637,7 @@ asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
582 struct task_struct *tsk = current; 637 struct task_struct *tsk = current;
583 638
584#ifdef CONFIG_SH_FPU_EMU 639#ifdef CONFIG_SH_FPU_EMU
585 unsigned short inst; 640 unsigned short inst = 0;
586 int err; 641 int err;
587 642
588 get_user(inst, (unsigned short*)regs.pc); 643 get_user(inst, (unsigned short*)regs.pc);
@@ -604,7 +659,8 @@ asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
604 } 659 }
605#endif 660#endif
606 661
607 asm volatile("stc r2_bank, %0": "=r" (error_code)); 662 lookup_exception_vector(error_code);
663
608 local_irq_enable(); 664 local_irq_enable();
609 tsk->thread.error_code = error_code; 665 tsk->thread.error_code = error_code;
610 tsk->thread.trap_no = TRAP_RESERVED_INST; 666 tsk->thread.trap_no = TRAP_RESERVED_INST;
@@ -663,7 +719,7 @@ asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
663 unsigned long error_code; 719 unsigned long error_code;
664 struct task_struct *tsk = current; 720 struct task_struct *tsk = current;
665#ifdef CONFIG_SH_FPU_EMU 721#ifdef CONFIG_SH_FPU_EMU
666 unsigned short inst; 722 unsigned short inst = 0;
667 723
668 get_user(inst, (unsigned short *)regs.pc + 1); 724 get_user(inst, (unsigned short *)regs.pc + 1);
669 if (!do_fpu_inst(inst, &regs)) { 725 if (!do_fpu_inst(inst, &regs)) {
@@ -675,7 +731,8 @@ asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
675 /* not a FPU inst. */ 731 /* not a FPU inst. */
676#endif 732#endif
677 733
678 asm volatile("stc r2_bank, %0": "=r" (error_code)); 734 lookup_exception_vector(error_code);
735
679 local_irq_enable(); 736 local_irq_enable();
680 tsk->thread.error_code = error_code; 737 tsk->thread.error_code = error_code;
681 tsk->thread.trap_no = TRAP_RESERVED_INST; 738 tsk->thread.trap_no = TRAP_RESERVED_INST;
@@ -689,7 +746,8 @@ asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
689 struct pt_regs regs) 746 struct pt_regs regs)
690{ 747{
691 long ex; 748 long ex;
692 asm volatile("stc r2_bank, %0" : "=r" (ex)); 749
750 lookup_exception_vector(ex);
693 die_if_kernel("exception", &regs, ex); 751 die_if_kernel("exception", &regs, ex);
694} 752}
695 753
@@ -741,6 +799,10 @@ void *set_exception_table_vec(unsigned int vec, void *handler)
741 return old_handler; 799 return old_handler;
742} 800}
743 801
802extern asmlinkage void address_error_handler(unsigned long r4, unsigned long r5,
803 unsigned long r6, unsigned long r7,
804 struct pt_regs regs);
805
744void __init trap_init(void) 806void __init trap_init(void)
745{ 807{
746 set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst); 808 set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
@@ -759,6 +821,14 @@ void __init trap_init(void)
759 set_exception_table_evt(0x800, do_fpu_state_restore); 821 set_exception_table_evt(0x800, do_fpu_state_restore);
760 set_exception_table_evt(0x820, do_fpu_state_restore); 822 set_exception_table_evt(0x820, do_fpu_state_restore);
761#endif 823#endif
824
825#ifdef CONFIG_CPU_SH2
826 set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_handler);
827#endif
828#ifdef CONFIG_CPU_SH2A
829 set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
830 set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
831#endif
762 832
763 /* Setup VBR for boot cpu */ 833 /* Setup VBR for boot cpu */
764 per_cpu_trap_init(); 834 per_cpu_trap_init();