aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2008-09-20 23:00:23 -0400
committerPaul Mundt <lethal@linux-sh.org>2008-09-20 23:00:23 -0400
commit4c59e2942e92d2d776bcd038604a5c3c1d56d3ac (patch)
tree56185c62c51852cf9c065b4d1b0313f69295bf27 /arch
parent347cd34f4b32be30d2a6d92fe4d6eac04b00a637 (diff)
sh: Move lookup_exception_vector() out to asm/system_32.h.
There are other places where we want to have access to the trap/exception number, so move out the lookup_exception_vector() helper. While we're at it, refactor it slightly to return the vector instead. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/include/asm/system_32.h25
-rw-r--r--arch/sh/kernel/traps_32.c16
2 files changed, 29 insertions, 12 deletions
diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h
index f7f105627fd9..a726d5d07277 100644
--- a/arch/sh/include/asm/system_32.h
+++ b/arch/sh/include/asm/system_32.h
@@ -97,6 +97,31 @@ do { \
97 : "=&r" (__dummy)); \ 97 : "=&r" (__dummy)); \
98} while (0) 98} while (0)
99 99
100#ifdef CONFIG_CPU_HAS_SR_RB
101#define lookup_exception_vector() \
102({ \
103 unsigned long _vec; \
104 \
105 __asm__ __volatile__ ( \
106 "stc r2_bank, %0\n\t" \
107 : "=r" (_vec) \
108 ); \
109 \
110 _vec; \
111})
112#else
113#define lookup_exception_vector() \
114({ \
115 unsigned long _vec; \
116 __asm__ __volatile__ ( \
117 "mov r4, %0\n\t" \
118 : "=r" (_vec) \
119 ); \
120 \
121 _vec; \
122})
123#endif
124
100int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs, 125int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs,
101 struct mem_access *ma); 126 struct mem_access *ma);
102 127
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 35b901ed6de3..b359b08a8e33 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -514,14 +514,6 @@ int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs,
514 return ret; 514 return ret;
515} 515}
516 516
517#ifdef CONFIG_CPU_HAS_SR_RB
518#define lookup_exception_vector(x) \
519 __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
520#else
521#define lookup_exception_vector(x) \
522 __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
523#endif
524
525/* 517/*
526 * Handle various address error exceptions: 518 * Handle various address error exceptions:
527 * - instruction address error: 519 * - instruction address error:
@@ -545,7 +537,7 @@ asmlinkage void do_address_error(struct pt_regs *regs,
545 537
546 /* Intentional ifdef */ 538 /* Intentional ifdef */
547#ifdef CONFIG_CPU_HAS_SR_RB 539#ifdef CONFIG_CPU_HAS_SR_RB
548 lookup_exception_vector(error_code); 540 error_code = lookup_exception_vector();
549#endif 541#endif
550 542
551 oldfs = get_fs(); 543 oldfs = get_fs();
@@ -686,7 +678,7 @@ asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
686 } 678 }
687#endif 679#endif
688 680
689 lookup_exception_vector(error_code); 681 error_code = lookup_exception_vector();
690 682
691 local_irq_enable(); 683 local_irq_enable();
692 CHK_REMOTE_DEBUG(regs); 684 CHK_REMOTE_DEBUG(regs);
@@ -759,7 +751,7 @@ asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
759 /* not a FPU inst. */ 751 /* not a FPU inst. */
760#endif 752#endif
761 753
762 lookup_exception_vector(inst); 754 inst = lookup_exception_vector();
763 755
764 local_irq_enable(); 756 local_irq_enable();
765 CHK_REMOTE_DEBUG(regs); 757 CHK_REMOTE_DEBUG(regs);
@@ -774,7 +766,7 @@ asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
774 struct pt_regs *regs = RELOC_HIDE(&__regs, 0); 766 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
775 long ex; 767 long ex;
776 768
777 lookup_exception_vector(ex); 769 ex = lookup_exception_vector();
778 die_if_kernel("exception", regs, ex); 770 die_if_kernel("exception", regs, ex);
779} 771}
780 772