aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-07-23 22:05:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-23 22:05:53 -0400
commit47b170af84d18b736bb35932823ec01cfcfe1967 (patch)
treed52acd411c4c2e05b9db553e9b6f4a6bf59f5ca4 /arch/sh/kernel
parent83c7f72259ea4bd0561e2f2762d97ee2888126ce (diff)
parent9ff561fdf73493d757bbc74aa58627e1381650fb (diff)
Merge tag 'sh-for-linus' of git://github.com/pmundt/linux-sh
Pull SuperH updates from Paul Mundt: - Migration off of old-style dynamic IRQ API. - irqdomain and generic irq chip propagation. - div4/6 clock consolidation, another step towards co-existing with the common struct clk infrastructure. - Extensive PFC rework - Decoupling GPIO from pin state. - Initial pinctrl support to facilitate incremental migration off of legacy pinmux. - gpiolib support made optional, and made pinctrl-backed. * tag 'sh-for-linus' of git://github.com/pmundt/linux-sh: (38 commits) sh: pfc: pin config get/set support. sh: pfc: Prefer DRV_NAME over KBUILD_MODNAME. sh: pfc: pinctrl legacy group support. sh: pfc: Ignore pinmux GPIOs with invalid enum IDs. sh: pfc: Export pinctrl binding init symbol. sh: pfc: Error out on pinctrl init resolution failure. sh: pfc: Make pr_fmt consistent across pfc drivers. sh: pfc: pinctrl legacy function support. sh: pfc: Rudimentary pinctrl-backed GPIO support. sh: pfc: Dumb GPIO stringification. sh: pfc: Shuffle PFC support core. sh: pfc: Verify pin type encoding size at build time. sh: pfc: Kill off unused pinmux bias flags. sh: pfc: Make gpio chip support optional where possible. sh: pfc: Split out gpio chip support. sh64: Fix up section mismatch warnings. sh64: Attempt to make reserved insn trap handler resemble C. sh: Consolidate die definitions for trap handlers. sh64: Kill off old exception debugging helpers. sh64: Use generic unaligned access control/counters. ...
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/cpu/sh5/unwind.c63
-rw-r--r--arch/sh/kernel/dumpstack.c58
-rw-r--r--arch/sh/kernel/irq.c10
-rw-r--r--arch/sh/kernel/traps.c71
-rw-r--r--arch/sh/kernel/traps_32.c121
-rw-r--r--arch/sh/kernel/traps_64.c589
6 files changed, 394 insertions, 518 deletions
diff --git a/arch/sh/kernel/cpu/sh5/unwind.c b/arch/sh/kernel/cpu/sh5/unwind.c
index b205b25eaf4..10aed41757f 100644
--- a/arch/sh/kernel/cpu/sh5/unwind.c
+++ b/arch/sh/kernel/cpu/sh5/unwind.c
@@ -16,6 +16,8 @@
16#include <asm/ptrace.h> 16#include <asm/ptrace.h>
17#include <asm/processor.h> 17#include <asm/processor.h>
18#include <asm/io.h> 18#include <asm/io.h>
19#include <asm/unwinder.h>
20#include <asm/stacktrace.h>
19 21
20static u8 regcache[63]; 22static u8 regcache[63];
21 23
@@ -199,8 +201,11 @@ static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
199 return 0; 201 return 0;
200} 202}
201 203
202/* Don't put this on the stack since we'll want to call sh64_unwind 204/*
203 * when we're close to underflowing the stack anyway. */ 205 * Don't put this on the stack since we'll want to call in to
206 * sh64_unwinder_dump() when we're close to underflowing the stack
207 * anyway.
208 */
204static struct pt_regs here_regs; 209static struct pt_regs here_regs;
205 210
206extern const char syscall_ret; 211extern const char syscall_ret;
@@ -208,17 +213,19 @@ extern const char ret_from_syscall;
208extern const char ret_from_exception; 213extern const char ret_from_exception;
209extern const char ret_from_irq; 214extern const char ret_from_irq;
210 215
211static void sh64_unwind_inner(struct pt_regs *regs); 216static void sh64_unwind_inner(const struct stacktrace_ops *ops,
217 void *data, struct pt_regs *regs);
212 218
213static void unwind_nested (unsigned long pc, unsigned long fp) 219static inline void unwind_nested(const struct stacktrace_ops *ops, void *data,
220 unsigned long pc, unsigned long fp)
214{ 221{
215 if ((fp >= __MEMORY_START) && 222 if ((fp >= __MEMORY_START) &&
216 ((fp & 7) == 0)) { 223 ((fp & 7) == 0))
217 sh64_unwind_inner((struct pt_regs *) fp); 224 sh64_unwind_inner(ops, data, (struct pt_regs *)fp);
218 }
219} 225}
220 226
221static void sh64_unwind_inner(struct pt_regs *regs) 227static void sh64_unwind_inner(const struct stacktrace_ops *ops,
228 void *data, struct pt_regs *regs)
222{ 229{
223 unsigned long pc, fp; 230 unsigned long pc, fp;
224 int ofs = 0; 231 int ofs = 0;
@@ -232,29 +239,29 @@ static void sh64_unwind_inner(struct pt_regs *regs)
232 int cond; 239 int cond;
233 unsigned long next_fp, next_pc; 240 unsigned long next_fp, next_pc;
234 241
235 if (pc == ((unsigned long) &syscall_ret & ~1)) { 242 if (pc == ((unsigned long)&syscall_ret & ~1)) {
236 printk("SYSCALL\n"); 243 printk("SYSCALL\n");
237 unwind_nested(pc,fp); 244 unwind_nested(ops, data, pc, fp);
238 return; 245 return;
239 } 246 }
240 247
241 if (pc == ((unsigned long) &ret_from_syscall & ~1)) { 248 if (pc == ((unsigned long)&ret_from_syscall & ~1)) {
242 printk("SYSCALL (PREEMPTED)\n"); 249 printk("SYSCALL (PREEMPTED)\n");
243 unwind_nested(pc,fp); 250 unwind_nested(ops, data, pc, fp);
244 return; 251 return;
245 } 252 }
246 253
247 /* In this case, the PC is discovered by lookup_prev_stack_frame but 254 /* In this case, the PC is discovered by lookup_prev_stack_frame but
248 it has 4 taken off it to look like the 'caller' */ 255 it has 4 taken off it to look like the 'caller' */
249 if (pc == ((unsigned long) &ret_from_exception & ~1)) { 256 if (pc == ((unsigned long)&ret_from_exception & ~1)) {
250 printk("EXCEPTION\n"); 257 printk("EXCEPTION\n");
251 unwind_nested(pc,fp); 258 unwind_nested(ops, data, pc, fp);
252 return; 259 return;
253 } 260 }
254 261
255 if (pc == ((unsigned long) &ret_from_irq & ~1)) { 262 if (pc == ((unsigned long)&ret_from_irq & ~1)) {
256 printk("IRQ\n"); 263 printk("IRQ\n");
257 unwind_nested(pc,fp); 264 unwind_nested(ops, data, pc, fp);
258 return; 265 return;
259 } 266 }
260 267
@@ -263,8 +270,7 @@ static void sh64_unwind_inner(struct pt_regs *regs)
263 270
264 pc -= ofs; 271 pc -= ofs;
265 272
266 printk("[<%08lx>] ", pc); 273 ops->address(data, pc, 1);
267 print_symbol("%s\n", pc);
268 274
269 if (first_pass) { 275 if (first_pass) {
270 /* If the innermost frame is a leaf function, it's 276 /* If the innermost frame is a leaf function, it's
@@ -287,10 +293,13 @@ static void sh64_unwind_inner(struct pt_regs *regs)
287 } 293 }
288 294
289 printk("\n"); 295 printk("\n");
290
291} 296}
292 297
293void sh64_unwind(struct pt_regs *regs) 298static void sh64_unwinder_dump(struct task_struct *task,
299 struct pt_regs *regs,
300 unsigned long *sp,
301 const struct stacktrace_ops *ops,
302 void *data)
294{ 303{
295 if (!regs) { 304 if (!regs) {
296 /* 305 /*
@@ -320,7 +329,17 @@ void sh64_unwind(struct pt_regs *regs)
320 ); 329 );
321 } 330 }
322 331
323 printk("\nCall Trace:\n"); 332 sh64_unwind_inner(ops, data, regs);
324 sh64_unwind_inner(regs);
325} 333}
326 334
335static struct unwinder sh64_unwinder = {
336 .name = "sh64-unwinder",
337 .dump = sh64_unwinder_dump,
338 .rating = 150,
339};
340
341static int __init sh64_unwinder_init(void)
342{
343 return unwinder_register(&sh64_unwinder);
344}
345early_initcall(sh64_unwinder_init);
diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c
index 694158b9a50..7617dc4129a 100644
--- a/arch/sh/kernel/dumpstack.c
+++ b/arch/sh/kernel/dumpstack.c
@@ -2,13 +2,48 @@
2 * Copyright (C) 1991, 1992 Linus Torvalds 2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs 3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 * Copyright (C) 2009 Matt Fleming 4 * Copyright (C) 2009 Matt Fleming
5 * Copyright (C) 2002 - 2012 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
5 */ 10 */
6#include <linux/kallsyms.h> 11#include <linux/kallsyms.h>
7#include <linux/ftrace.h> 12#include <linux/ftrace.h>
8#include <linux/debug_locks.h> 13#include <linux/debug_locks.h>
14#include <linux/kdebug.h>
15#include <linux/export.h>
16#include <linux/uaccess.h>
9#include <asm/unwinder.h> 17#include <asm/unwinder.h>
10#include <asm/stacktrace.h> 18#include <asm/stacktrace.h>
11 19
20void dump_mem(const char *str, unsigned long bottom, unsigned long top)
21{
22 unsigned long p;
23 int i;
24
25 printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
26
27 for (p = bottom & ~31; p < top; ) {
28 printk("%04lx: ", p & 0xffff);
29
30 for (i = 0; i < 8; i++, p += 4) {
31 unsigned int val;
32
33 if (p < bottom || p >= top)
34 printk(" ");
35 else {
36 if (__get_user(val, (unsigned int __user *)p)) {
37 printk("\n");
38 return;
39 }
40 printk("%08x ", val);
41 }
42 }
43 printk("\n");
44 }
45}
46
12void printk_address(unsigned long address, int reliable) 47void printk_address(unsigned long address, int reliable)
13{ 48{
14 printk(" [<%p>] %s%pS\n", (void *) address, 49 printk(" [<%p>] %s%pS\n", (void *) address,
@@ -106,3 +141,26 @@ void show_trace(struct task_struct *tsk, unsigned long *sp,
106 141
107 debug_show_held_locks(tsk); 142 debug_show_held_locks(tsk);
108} 143}
144
145void show_stack(struct task_struct *tsk, unsigned long *sp)
146{
147 unsigned long stack;
148
149 if (!tsk)
150 tsk = current;
151 if (tsk == current)
152 sp = (unsigned long *)current_stack_pointer;
153 else
154 sp = (unsigned long *)tsk->thread.sp;
155
156 stack = (unsigned long)sp;
157 dump_mem("Stack: ", stack, THREAD_SIZE +
158 (unsigned long)task_stack_page(tsk));
159 show_trace(tsk, sp, NULL);
160}
161
162void dump_stack(void)
163{
164 show_stack(NULL, NULL);
165}
166EXPORT_SYMBOL(dump_stack);
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index dadce735f74..063af10ff3c 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -231,16 +231,6 @@ void __init init_IRQ(void)
231 irq_ctx_init(smp_processor_id()); 231 irq_ctx_init(smp_processor_id());
232} 232}
233 233
234#ifdef CONFIG_SPARSE_IRQ
235int __init arch_probe_nr_irqs(void)
236{
237 /*
238 * No pre-allocated IRQs.
239 */
240 return 0;
241}
242#endif
243
244#ifdef CONFIG_HOTPLUG_CPU 234#ifdef CONFIG_HOTPLUG_CPU
245static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu) 235static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
246{ 236{
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index a87e58a9e38..72246bc0688 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -6,9 +6,80 @@
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/uaccess.h> 7#include <linux/uaccess.h>
8#include <linux/hardirq.h> 8#include <linux/hardirq.h>
9#include <linux/kernel.h>
10#include <linux/kexec.h>
11#include <linux/module.h>
9#include <asm/unwinder.h> 12#include <asm/unwinder.h>
10#include <asm/traps.h> 13#include <asm/traps.h>
11 14
15static DEFINE_SPINLOCK(die_lock);
16
17void die(const char *str, struct pt_regs *regs, long err)
18{
19 static int die_counter;
20
21 oops_enter();
22
23 spin_lock_irq(&die_lock);
24 console_verbose();
25 bust_spinlocks(1);
26
27 printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
28 print_modules();
29 show_regs(regs);
30
31 printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
32 task_pid_nr(current), task_stack_page(current) + 1);
33
34 if (!user_mode(regs) || in_interrupt())
35 dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
36 (unsigned long)task_stack_page(current));
37
38 notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV);
39
40 bust_spinlocks(0);
41 add_taint(TAINT_DIE);
42 spin_unlock_irq(&die_lock);
43 oops_exit();
44
45 if (kexec_should_crash(current))
46 crash_kexec(regs);
47
48 if (in_interrupt())
49 panic("Fatal exception in interrupt");
50
51 if (panic_on_oops)
52 panic("Fatal exception");
53
54 do_exit(SIGSEGV);
55}
56
57void die_if_kernel(const char *str, struct pt_regs *regs, long err)
58{
59 if (!user_mode(regs))
60 die(str, regs, err);
61}
62
63/*
64 * try and fix up kernelspace address errors
65 * - userspace errors just cause EFAULT to be returned, resulting in SEGV
66 * - kernel/userspace interfaces cause a jump to an appropriate handler
67 * - other kernel errors are bad
68 */
69void die_if_no_fixup(const char *str, struct pt_regs *regs, long err)
70{
71 if (!user_mode(regs)) {
72 const struct exception_table_entry *fixup;
73 fixup = search_exception_tables(regs->pc);
74 if (fixup) {
75 regs->pc = fixup->fixup;
76 return;
77 }
78
79 die(str, regs, err);
80 }
81}
82
12#ifdef CONFIG_GENERIC_BUG 83#ifdef CONFIG_GENERIC_BUG
13static void handle_BUG(struct pt_regs *regs) 84static void handle_BUG(struct pt_regs *regs)
14{ 85{
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index a37175deb73..5f513a64ded 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -16,13 +16,11 @@
16#include <linux/hardirq.h> 16#include <linux/hardirq.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/module.h>
20#include <linux/kallsyms.h> 19#include <linux/kallsyms.h>
21#include <linux/io.h> 20#include <linux/io.h>
22#include <linux/bug.h> 21#include <linux/bug.h>
23#include <linux/debug_locks.h> 22#include <linux/debug_locks.h>
24#include <linux/kdebug.h> 23#include <linux/kdebug.h>
25#include <linux/kexec.h>
26#include <linux/limits.h> 24#include <linux/limits.h>
27#include <linux/sysfs.h> 25#include <linux/sysfs.h>
28#include <linux/uaccess.h> 26#include <linux/uaccess.h>
@@ -48,102 +46,6 @@
48#define TRAP_ILLEGAL_SLOT_INST 13 46#define TRAP_ILLEGAL_SLOT_INST 13
49#endif 47#endif
50 48
51static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
52{
53 unsigned long p;
54 int i;
55
56 printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
57
58 for (p = bottom & ~31; p < top; ) {
59 printk("%04lx: ", p & 0xffff);
60
61 for (i = 0; i < 8; i++, p += 4) {
62 unsigned int val;
63
64 if (p < bottom || p >= top)
65 printk(" ");
66 else {
67 if (__get_user(val, (unsigned int __user *)p)) {
68 printk("\n");
69 return;
70 }
71 printk("%08x ", val);
72 }
73 }
74 printk("\n");
75 }
76}
77
78static DEFINE_SPINLOCK(die_lock);
79
80void die(const char * str, struct pt_regs * regs, long err)
81{
82 static int die_counter;
83
84 oops_enter();
85
86 spin_lock_irq(&die_lock);
87 console_verbose();
88 bust_spinlocks(1);
89
90 printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
91 print_modules();
92 show_regs(regs);
93
94 printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
95 task_pid_nr(current), task_stack_page(current) + 1);
96
97 if (!user_mode(regs) || in_interrupt())
98 dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
99 (unsigned long)task_stack_page(current));
100
101 notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV);
102
103 bust_spinlocks(0);
104 add_taint(TAINT_DIE);
105 spin_unlock_irq(&die_lock);
106 oops_exit();
107
108 if (kexec_should_crash(current))
109 crash_kexec(regs);
110
111 if (in_interrupt())
112 panic("Fatal exception in interrupt");
113
114 if (panic_on_oops)
115 panic("Fatal exception");
116
117 do_exit(SIGSEGV);
118}
119
120static inline void die_if_kernel(const char *str, struct pt_regs *regs,
121 long err)
122{
123 if (!user_mode(regs))
124 die(str, regs, err);
125}
126
127/*
128 * try and fix up kernelspace address errors
129 * - userspace errors just cause EFAULT to be returned, resulting in SEGV
130 * - kernel/userspace interfaces cause a jump to an appropriate handler
131 * - other kernel errors are bad
132 */
133static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
134{
135 if (!user_mode(regs)) {
136 const struct exception_table_entry *fixup;
137 fixup = search_exception_tables(regs->pc);
138 if (fixup) {
139 regs->pc = fixup->fixup;
140 return;
141 }
142
143 die(str, regs, err);
144 }
145}
146
147static inline void sign_extend(unsigned int count, unsigned char *dst) 49static inline void sign_extend(unsigned int count, unsigned char *dst)
148{ 50{
149#ifdef __LITTLE_ENDIAN__ 51#ifdef __LITTLE_ENDIAN__
@@ -900,26 +802,3 @@ void __init trap_init(void)
900 set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler); 802 set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler);
901#endif 803#endif
902} 804}
903
904void show_stack(struct task_struct *tsk, unsigned long *sp)
905{
906 unsigned long stack;
907
908 if (!tsk)
909 tsk = current;
910 if (tsk == current)
911 sp = (unsigned long *)current_stack_pointer;
912 else
913 sp = (unsigned long *)tsk->thread.sp;
914
915 stack = (unsigned long)sp;
916 dump_mem("Stack: ", stack, THREAD_SIZE +
917 (unsigned long)task_stack_page(tsk));
918 show_trace(tsk, sp, NULL);
919}
920
921void dump_stack(void)
922{
923 show_stack(NULL, NULL);
924}
925EXPORT_SYMBOL(dump_stack);
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
index 8dae93ed8af..f87d20da179 100644
--- a/arch/sh/kernel/traps_64.c
+++ b/arch/sh/kernel/traps_64.c
@@ -27,283 +27,25 @@
27#include <linux/perf_event.h> 27#include <linux/perf_event.h>
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29#include <asm/io.h> 29#include <asm/io.h>
30#include <linux/atomic.h> 30#include <asm/alignment.h>
31#include <asm/processor.h> 31#include <asm/processor.h>
32#include <asm/pgtable.h> 32#include <asm/pgtable.h>
33#include <asm/fpu.h> 33#include <asm/fpu.h>
34 34
35#undef DEBUG_EXCEPTION 35static int read_opcode(reg_size_t pc, insn_size_t *result_opcode, int from_user_mode)
36#ifdef DEBUG_EXCEPTION
37/* implemented in ../lib/dbg.c */
38extern void show_excp_regs(char *fname, int trapnr, int signr,
39 struct pt_regs *regs);
40#else
41#define show_excp_regs(a, b, c, d)
42#endif
43
44static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
45 unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
46
47#define DO_ERROR(trapnr, signr, str, name, tsk) \
48asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
49{ \
50 do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
51}
52
53static DEFINE_SPINLOCK(die_lock);
54
55void die(const char * str, struct pt_regs * regs, long err)
56{
57 console_verbose();
58 spin_lock_irq(&die_lock);
59 printk("%s: %lx\n", str, (err & 0xffffff));
60 show_regs(regs);
61 spin_unlock_irq(&die_lock);
62 do_exit(SIGSEGV);
63}
64
65static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
66{
67 if (!user_mode(regs))
68 die(str, regs, err);
69}
70
71static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
72{
73 if (!user_mode(regs)) {
74 const struct exception_table_entry *fixup;
75 fixup = search_exception_tables(regs->pc);
76 if (fixup) {
77 regs->pc = fixup->fixup;
78 return;
79 }
80 die(str, regs, err);
81 }
82}
83
84DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current)
85DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
86
87
88/* Implement misaligned load/store handling for kernel (and optionally for user
89 mode too). Limitation : only SHmedia mode code is handled - there is no
90 handling at all for misaligned accesses occurring in SHcompact code yet. */
91
92static int misaligned_fixup(struct pt_regs *regs);
93
94asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
95{
96 if (misaligned_fixup(regs) < 0) {
97 do_unhandled_exception(7, SIGSEGV, "address error(load)",
98 "do_address_error_load",
99 error_code, regs, current);
100 }
101 return;
102}
103
104asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
105{
106 if (misaligned_fixup(regs) < 0) {
107 do_unhandled_exception(8, SIGSEGV, "address error(store)",
108 "do_address_error_store",
109 error_code, regs, current);
110 }
111 return;
112}
113
114#if defined(CONFIG_SH64_ID2815_WORKAROUND)
115
116#define OPCODE_INVALID 0
117#define OPCODE_USER_VALID 1
118#define OPCODE_PRIV_VALID 2
119
120/* getcon/putcon - requires checking which control register is referenced. */
121#define OPCODE_CTRL_REG 3
122
123/* Table of valid opcodes for SHmedia mode.
124 Form a 10-bit value by concatenating the major/minor opcodes i.e.
125 opcode[31:26,20:16]. The 6 MSBs of this value index into the following
126 array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
127 LSBs==4'b0000 etc). */
128static unsigned long shmedia_opcode_table[64] = {
129 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
130 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
131 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
132 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
133 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
134 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
135 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
136 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
137};
138
139void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
140{
141 /* Workaround SH5-101 cut2 silicon defect #2815 :
142 in some situations, inter-mode branches from SHcompact -> SHmedia
143 which should take ITLBMISS or EXECPROT exceptions at the target
144 falsely take RESINST at the target instead. */
145
146 unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
147 unsigned long pc, aligned_pc;
148 int get_user_error;
149 int trapnr = 12;
150 int signr = SIGILL;
151 char *exception_name = "reserved_instruction";
152
153 pc = regs->pc;
154 if ((pc & 3) == 1) {
155 /* SHmedia : check for defect. This requires executable vmas
156 to be readable too. */
157 aligned_pc = pc & ~3;
158 if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
159 get_user_error = -EFAULT;
160 } else {
161 get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
162 }
163 if (get_user_error >= 0) {
164 unsigned long index, shift;
165 unsigned long major, minor, combined;
166 unsigned long reserved_field;
167 reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */
168 major = (opcode >> 26) & 0x3f;
169 minor = (opcode >> 16) & 0xf;
170 combined = (major << 4) | minor;
171 index = major;
172 shift = minor << 1;
173 if (reserved_field == 0) {
174 int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
175 switch (opcode_state) {
176 case OPCODE_INVALID:
177 /* Trap. */
178 break;
179 case OPCODE_USER_VALID:
180 /* Restart the instruction : the branch to the instruction will now be from an RTE
181 not from SHcompact so the silicon defect won't be triggered. */
182 return;
183 case OPCODE_PRIV_VALID:
184 if (!user_mode(regs)) {
185 /* Should only ever get here if a module has
186 SHcompact code inside it. If so, the same fix up is needed. */
187 return; /* same reason */
188 }
189 /* Otherwise, user mode trying to execute a privileged instruction -
190 fall through to trap. */
191 break;
192 case OPCODE_CTRL_REG:
193 /* If in privileged mode, return as above. */
194 if (!user_mode(regs)) return;
195 /* In user mode ... */
196 if (combined == 0x9f) { /* GETCON */
197 unsigned long regno = (opcode >> 20) & 0x3f;
198 if (regno >= 62) {
199 return;
200 }
201 /* Otherwise, reserved or privileged control register, => trap */
202 } else if (combined == 0x1bf) { /* PUTCON */
203 unsigned long regno = (opcode >> 4) & 0x3f;
204 if (regno >= 62) {
205 return;
206 }
207 /* Otherwise, reserved or privileged control register, => trap */
208 } else {
209 /* Trap */
210 }
211 break;
212 default:
213 /* Fall through to trap. */
214 break;
215 }
216 }
217 /* fall through to normal resinst processing */
218 } else {
219 /* Error trying to read opcode. This typically means a
220 real fault, not a RESINST any more. So change the
221 codes. */
222 trapnr = 87;
223 exception_name = "address error (exec)";
224 signr = SIGSEGV;
225 }
226 }
227
228 do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
229}
230
231#else /* CONFIG_SH64_ID2815_WORKAROUND */
232
233/* If the workaround isn't needed, this is just a straightforward reserved
234 instruction */
235DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current)
236
237#endif /* CONFIG_SH64_ID2815_WORKAROUND */
238
239/* Called with interrupts disabled */
240asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
241{
242 show_excp_regs(__func__, -1, -1, regs);
243 die_if_kernel("exception", regs, ex);
244}
245
246int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
247{
248 /* Syscall debug */
249 printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId);
250
251 die_if_kernel("unknown trapa", regs, scId);
252
253 return -ENOSYS;
254}
255
256void show_stack(struct task_struct *tsk, unsigned long *sp)
257{
258#ifdef CONFIG_KALLSYMS
259 extern void sh64_unwind(struct pt_regs *regs);
260 struct pt_regs *regs;
261
262 regs = tsk ? tsk->thread.kregs : NULL;
263
264 sh64_unwind(regs);
265#else
266 printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
267#endif
268}
269
270void show_task(unsigned long *sp)
271{
272 show_stack(NULL, sp);
273}
274
275void dump_stack(void)
276{
277 show_task(NULL);
278}
279/* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
280EXPORT_SYMBOL(dump_stack);
281
282static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
283 unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
284{
285 show_excp_regs(fn_name, trapnr, signr, regs);
286
287 if (user_mode(regs))
288 force_sig(signr, tsk);
289
290 die_if_no_fixup(str, regs, error_code);
291}
292
293static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
294{ 36{
295 int get_user_error; 37 int get_user_error;
296 unsigned long aligned_pc; 38 unsigned long aligned_pc;
297 unsigned long opcode; 39 insn_size_t opcode;
298 40
299 if ((pc & 3) == 1) { 41 if ((pc & 3) == 1) {
300 /* SHmedia */ 42 /* SHmedia */
301 aligned_pc = pc & ~3; 43 aligned_pc = pc & ~3;
302 if (from_user_mode) { 44 if (from_user_mode) {
303 if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) { 45 if (!access_ok(VERIFY_READ, aligned_pc, sizeof(insn_size_t))) {
304 get_user_error = -EFAULT; 46 get_user_error = -EFAULT;
305 } else { 47 } else {
306 get_user_error = __get_user(opcode, (unsigned long *)aligned_pc); 48 get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
307 *result_opcode = opcode; 49 *result_opcode = opcode;
308 } 50 }
309 return get_user_error; 51 return get_user_error;
@@ -311,7 +53,7 @@ static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int
311 /* If the fault was in the kernel, we can either read 53 /* If the fault was in the kernel, we can either read
312 * this directly, or if not, we fault. 54 * this directly, or if not, we fault.
313 */ 55 */
314 *result_opcode = *(unsigned long *) aligned_pc; 56 *result_opcode = *(insn_size_t *)aligned_pc;
315 return 0; 57 return 0;
316 } 58 }
317 } else if ((pc & 1) == 0) { 59 } else if ((pc & 1) == 0) {
@@ -337,17 +79,23 @@ static int address_is_sign_extended(__u64 a)
337#endif 79#endif
338} 80}
339 81
82/* return -1 for fault, 0 for OK */
340static int generate_and_check_address(struct pt_regs *regs, 83static int generate_and_check_address(struct pt_regs *regs,
341 __u32 opcode, 84 insn_size_t opcode,
342 int displacement_not_indexed, 85 int displacement_not_indexed,
343 int width_shift, 86 int width_shift,
344 __u64 *address) 87 __u64 *address)
345{ 88{
346 /* return -1 for fault, 0 for OK */
347
348 __u64 base_address, addr; 89 __u64 base_address, addr;
349 int basereg; 90 int basereg;
350 91
92 switch (1 << width_shift) {
93 case 1: inc_unaligned_byte_access(); break;
94 case 2: inc_unaligned_word_access(); break;
95 case 4: inc_unaligned_dword_access(); break;
96 case 8: inc_unaligned_multi_access(); break;
97 }
98
351 basereg = (opcode >> 20) & 0x3f; 99 basereg = (opcode >> 20) & 0x3f;
352 base_address = regs->regs[basereg]; 100 base_address = regs->regs[basereg];
353 if (displacement_not_indexed) { 101 if (displacement_not_indexed) {
@@ -364,28 +112,28 @@ static int generate_and_check_address(struct pt_regs *regs,
364 } 112 }
365 113
366 /* Check sign extended */ 114 /* Check sign extended */
367 if (!address_is_sign_extended(addr)) { 115 if (!address_is_sign_extended(addr))
368 return -1; 116 return -1;
369 }
370 117
371 /* Check accessible. For misaligned access in the kernel, assume the 118 /* Check accessible. For misaligned access in the kernel, assume the
372 address is always accessible (and if not, just fault when the 119 address is always accessible (and if not, just fault when the
373 load/store gets done.) */ 120 load/store gets done.) */
374 if (user_mode(regs)) { 121 if (user_mode(regs)) {
375 if (addr >= TASK_SIZE) { 122 inc_unaligned_user_access();
123
124 if (addr >= TASK_SIZE)
376 return -1; 125 return -1;
377 } 126 } else
378 /* Do access_ok check later - it depends on whether it's a load or a store. */ 127 inc_unaligned_kernel_access();
379 }
380 128
381 *address = addr; 129 *address = addr;
130
131 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, addr);
132 unaligned_fixups_notify(current, opcode, regs);
133
382 return 0; 134 return 0;
383} 135}
384 136
385static int user_mode_unaligned_fixup_count = 10;
386static int user_mode_unaligned_fixup_enable = 1;
387static int kernel_mode_unaligned_fixup_count = 32;
388
389static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result) 137static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
390{ 138{
391 unsigned short x; 139 unsigned short x;
@@ -415,7 +163,7 @@ static void misaligned_kernel_word_store(__u64 address, __u64 value)
415} 163}
416 164
417static int misaligned_load(struct pt_regs *regs, 165static int misaligned_load(struct pt_regs *regs,
418 __u32 opcode, 166 insn_size_t opcode,
419 int displacement_not_indexed, 167 int displacement_not_indexed,
420 int width_shift, 168 int width_shift,
421 int do_sign_extend) 169 int do_sign_extend)
@@ -427,11 +175,8 @@ static int misaligned_load(struct pt_regs *regs,
427 175
428 error = generate_and_check_address(regs, opcode, 176 error = generate_and_check_address(regs, opcode,
429 displacement_not_indexed, width_shift, &address); 177 displacement_not_indexed, width_shift, &address);
430 if (error < 0) { 178 if (error < 0)
431 return error; 179 return error;
432 }
433
434 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
435 180
436 destreg = (opcode >> 4) & 0x3f; 181 destreg = (opcode >> 4) & 0x3f;
437 if (user_mode(regs)) { 182 if (user_mode(regs)) {
@@ -490,11 +235,10 @@ static int misaligned_load(struct pt_regs *regs,
490 } 235 }
491 236
492 return 0; 237 return 0;
493
494} 238}
495 239
496static int misaligned_store(struct pt_regs *regs, 240static int misaligned_store(struct pt_regs *regs,
497 __u32 opcode, 241 insn_size_t opcode,
498 int displacement_not_indexed, 242 int displacement_not_indexed,
499 int width_shift) 243 int width_shift)
500{ 244{
@@ -505,11 +249,8 @@ static int misaligned_store(struct pt_regs *regs,
505 249
506 error = generate_and_check_address(regs, opcode, 250 error = generate_and_check_address(regs, opcode,
507 displacement_not_indexed, width_shift, &address); 251 displacement_not_indexed, width_shift, &address);
508 if (error < 0) { 252 if (error < 0)
509 return error; 253 return error;
510 }
511
512 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
513 254
514 srcreg = (opcode >> 4) & 0x3f; 255 srcreg = (opcode >> 4) & 0x3f;
515 if (user_mode(regs)) { 256 if (user_mode(regs)) {
@@ -563,13 +304,12 @@ static int misaligned_store(struct pt_regs *regs,
563 } 304 }
564 305
565 return 0; 306 return 0;
566
567} 307}
568 308
569/* Never need to fix up misaligned FPU accesses within the kernel since that's a real 309/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
570 error. */ 310 error. */
571static int misaligned_fpu_load(struct pt_regs *regs, 311static int misaligned_fpu_load(struct pt_regs *regs,
572 __u32 opcode, 312 insn_size_t opcode,
573 int displacement_not_indexed, 313 int displacement_not_indexed,
574 int width_shift, 314 int width_shift,
575 int do_paired_load) 315 int do_paired_load)
@@ -581,11 +321,8 @@ static int misaligned_fpu_load(struct pt_regs *regs,
581 321
582 error = generate_and_check_address(regs, opcode, 322 error = generate_and_check_address(regs, opcode,
583 displacement_not_indexed, width_shift, &address); 323 displacement_not_indexed, width_shift, &address);
584 if (error < 0) { 324 if (error < 0)
585 return error; 325 return error;
586 }
587
588 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);
589 326
590 destreg = (opcode >> 4) & 0x3f; 327 destreg = (opcode >> 4) & 0x3f;
591 if (user_mode(regs)) { 328 if (user_mode(regs)) {
@@ -641,12 +378,10 @@ static int misaligned_fpu_load(struct pt_regs *regs,
641 die ("Misaligned FPU load inside kernel", regs, 0); 378 die ("Misaligned FPU load inside kernel", regs, 0);
642 return -1; 379 return -1;
643 } 380 }
644
645
646} 381}
647 382
648static int misaligned_fpu_store(struct pt_regs *regs, 383static int misaligned_fpu_store(struct pt_regs *regs,
649 __u32 opcode, 384 insn_size_t opcode,
650 int displacement_not_indexed, 385 int displacement_not_indexed,
651 int width_shift, 386 int width_shift,
652 int do_paired_load) 387 int do_paired_load)
@@ -658,11 +393,8 @@ static int misaligned_fpu_store(struct pt_regs *regs,
658 393
659 error = generate_and_check_address(regs, opcode, 394 error = generate_and_check_address(regs, opcode,
660 displacement_not_indexed, width_shift, &address); 395 displacement_not_indexed, width_shift, &address);
661 if (error < 0) { 396 if (error < 0)
662 return error; 397 return error;
663 }
664
665 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);
666 398
667 srcreg = (opcode >> 4) & 0x3f; 399 srcreg = (opcode >> 4) & 0x3f;
668 if (user_mode(regs)) { 400 if (user_mode(regs)) {
@@ -723,11 +455,13 @@ static int misaligned_fpu_store(struct pt_regs *regs,
723 455
724static int misaligned_fixup(struct pt_regs *regs) 456static int misaligned_fixup(struct pt_regs *regs)
725{ 457{
726 unsigned long opcode; 458 insn_size_t opcode;
727 int error; 459 int error;
728 int major, minor; 460 int major, minor;
461 unsigned int user_action;
729 462
730 if (!user_mode_unaligned_fixup_enable) 463 user_action = unaligned_user_action();
464 if (!(user_action & UM_FIXUP))
731 return -1; 465 return -1;
732 466
733 error = read_opcode(regs->pc, &opcode, user_mode(regs)); 467 error = read_opcode(regs->pc, &opcode, user_mode(regs));
@@ -737,23 +471,6 @@ static int misaligned_fixup(struct pt_regs *regs)
737 major = (opcode >> 26) & 0x3f; 471 major = (opcode >> 26) & 0x3f;
738 minor = (opcode >> 16) & 0xf; 472 minor = (opcode >> 16) & 0xf;
739 473
740 if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
741 --user_mode_unaligned_fixup_count;
742 /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
743 printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
744 current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
745 } else if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
746 --kernel_mode_unaligned_fixup_count;
747 if (in_interrupt()) {
748 printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
749 (__u32)regs->pc, opcode);
750 } else {
751 printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
752 current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
753 }
754 }
755
756
757 switch (major) { 474 switch (major) {
758 case (0x84>>2): /* LD.W */ 475 case (0x84>>2): /* LD.W */
759 error = misaligned_load(regs, opcode, 1, 1, 1); 476 error = misaligned_load(regs, opcode, 1, 1, 1);
@@ -878,59 +595,202 @@ static int misaligned_fixup(struct pt_regs *regs)
878 regs->pc += 4; /* Skip the instruction that's just been emulated */ 595 regs->pc += 4; /* Skip the instruction that's just been emulated */
879 return 0; 596 return 0;
880 } 597 }
598}
599
600static void do_unhandled_exception(int signr, char *str, unsigned long error,
601 struct pt_regs *regs)
602{
603 if (user_mode(regs))
604 force_sig(signr, current);
881 605
606 die_if_no_fixup(str, regs, error);
882} 607}
883 608
884static ctl_table unaligned_table[] = { 609#define DO_ERROR(signr, str, name) \
885 { 610asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
886 .procname = "kernel_reports", 611{ \
887 .data = &kernel_mode_unaligned_fixup_count, 612 do_unhandled_exception(signr, str, error_code, regs); \
888 .maxlen = sizeof(int), 613}
889 .mode = 0644,
890 .proc_handler = proc_dointvec
891 },
892 {
893 .procname = "user_reports",
894 .data = &user_mode_unaligned_fixup_count,
895 .maxlen = sizeof(int),
896 .mode = 0644,
897 .proc_handler = proc_dointvec
898 },
899 {
900 .procname = "user_enable",
901 .data = &user_mode_unaligned_fixup_enable,
902 .maxlen = sizeof(int),
903 .mode = 0644,
904 .proc_handler = proc_dointvec},
905 {}
906};
907 614
908static ctl_table unaligned_root[] = { 615DO_ERROR(SIGILL, "illegal slot instruction", illegal_slot_inst)
909 { 616DO_ERROR(SIGSEGV, "address error (exec)", address_error_exec)
910 .procname = "unaligned_fixup", 617
911 .mode = 0555, 618#if defined(CONFIG_SH64_ID2815_WORKAROUND)
912 .child = unaligned_table 619
913 }, 620#define OPCODE_INVALID 0
914 {} 621#define OPCODE_USER_VALID 1
915}; 622#define OPCODE_PRIV_VALID 2
916 623
917static ctl_table sh64_root[] = { 624/* getcon/putcon - requires checking which control register is referenced. */
918 { 625#define OPCODE_CTRL_REG 3
919 .procname = "sh64", 626
920 .mode = 0555, 627/* Table of valid opcodes for SHmedia mode.
921 .child = unaligned_root 628 Form a 10-bit value by concatenating the major/minor opcodes i.e.
922 }, 629 opcode[31:26,20:16]. The 6 MSBs of this value index into the following
923 {} 630 array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
631 LSBs==4'b0000 etc). */
632static unsigned long shmedia_opcode_table[64] = {
633 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
634 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
635 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
636 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
637 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
638 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
639 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
640 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
924}; 641};
925static struct ctl_table_header *sysctl_header; 642
926static int __init init_sysctl(void) 643/* Workaround SH5-101 cut2 silicon defect #2815 :
644 in some situations, inter-mode branches from SHcompact -> SHmedia
645 which should take ITLBMISS or EXECPROT exceptions at the target
646 falsely take RESINST at the target instead. */
647void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
927{ 648{
928 sysctl_header = register_sysctl_table(sh64_root); 649 insn_size_t opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
929 return 0; 650 unsigned long pc, aligned_pc;
651 unsigned long index, shift;
652 unsigned long major, minor, combined;
653 unsigned long reserved_field;
654 int opcode_state;
655 int get_user_error;
656 int signr = SIGILL;
657 char *exception_name = "reserved_instruction";
658
659 pc = regs->pc;
660
661 /* SHcompact is not handled */
662 if (unlikely((pc & 3) == 0))
663 goto out;
664
665 /* SHmedia : check for defect. This requires executable vmas
666 to be readable too. */
667 aligned_pc = pc & ~3;
668 if (!access_ok(VERIFY_READ, aligned_pc, sizeof(insn_size_t)))
669 get_user_error = -EFAULT;
670 else
671 get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
672
673 if (get_user_error < 0) {
674 /*
675 * Error trying to read opcode. This typically means a
676 * real fault, not a RESINST any more. So change the
677 * codes.
678 */
679 exception_name = "address error (exec)";
680 signr = SIGSEGV;
681 goto out;
682 }
683
684 /* These bits are currently reserved as zero in all valid opcodes */
685 reserved_field = opcode & 0xf;
686 if (unlikely(reserved_field))
687 goto out; /* invalid opcode */
688
689 major = (opcode >> 26) & 0x3f;
690 minor = (opcode >> 16) & 0xf;
691 combined = (major << 4) | minor;
692 index = major;
693 shift = minor << 1;
694 opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
695 switch (opcode_state) {
696 case OPCODE_INVALID:
697 /* Trap. */
698 break;
699 case OPCODE_USER_VALID:
700 /*
701 * Restart the instruction: the branch to the instruction
702 * will now be from an RTE not from SHcompact so the
703 * silicon defect won't be triggered.
704 */
705 return;
706 case OPCODE_PRIV_VALID:
707 if (!user_mode(regs)) {
708 /*
709 * Should only ever get here if a module has
710 * SHcompact code inside it. If so, the same fix
711 * up is needed.
712 */
713 return; /* same reason */
714 }
715
716 /*
717 * Otherwise, user mode trying to execute a privileged
718 * instruction - fall through to trap.
719 */
720 break;
721 case OPCODE_CTRL_REG:
722 /* If in privileged mode, return as above. */
723 if (!user_mode(regs))
724 return;
725
726 /* In user mode ... */
727 if (combined == 0x9f) { /* GETCON */
728 unsigned long regno = (opcode >> 20) & 0x3f;
729
730 if (regno >= 62)
731 return;
732
733 /* reserved/privileged control register => trap */
734 } else if (combined == 0x1bf) { /* PUTCON */
735 unsigned long regno = (opcode >> 4) & 0x3f;
736
737 if (regno >= 62)
738 return;
739
740 /* reserved/privileged control register => trap */
741 }
742
743 break;
744 default:
745 /* Fall through to trap. */
746 break;
747 }
748
749out:
750 do_unhandled_exception(signr, exception_name, error_code, regs);
930} 751}
931 752
932__initcall(init_sysctl); 753#else /* CONFIG_SH64_ID2815_WORKAROUND */
933 754
755/* If the workaround isn't needed, this is just a straightforward reserved
756 instruction */
757DO_ERROR(SIGILL, "reserved instruction", reserved_inst)
758
759#endif /* CONFIG_SH64_ID2815_WORKAROUND */
760
761/* Called with interrupts disabled */
762asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
763{
764 die_if_kernel("exception", regs, ex);
765}
766
767asmlinkage int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
768{
769 /* Syscall debug */
770 printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId);
771
772 die_if_kernel("unknown trapa", regs, scId);
773
774 return -ENOSYS;
775}
776
777/* Implement misaligned load/store handling for kernel (and optionally for user
778 mode too). Limitation : only SHmedia mode code is handled - there is no
779 handling at all for misaligned accesses occurring in SHcompact code yet. */
780
781asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
782{
783 if (misaligned_fixup(regs) < 0)
784 do_unhandled_exception(SIGSEGV, "address error(load)",
785 error_code, regs);
786}
787
788asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
789{
790 if (misaligned_fixup(regs) < 0)
791 do_unhandled_exception(SIGSEGV, "address error(store)",
792 error_code, regs);
793}
934 794
935asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs) 795asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
936{ 796{
@@ -942,10 +802,9 @@ asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
942 of access we make to them - just go direct to their physical 802 of access we make to them - just go direct to their physical
943 addresses. */ 803 addresses. */
944 exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY); 804 exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
945 if (exp_cause & ~4) { 805 if (exp_cause & ~4)
946 printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n", 806 printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
947 (unsigned long)(exp_cause & 0xffffffff)); 807 (unsigned long)(exp_cause & 0xffffffff));
948 }
949 show_state(); 808 show_state();
950 /* Clear all DEBUGINT causes */ 809 /* Clear all DEBUGINT causes */
951 poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0); 810 poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);