diff options
Diffstat (limited to 'arch/sh/kernel/traps.c')
-rw-r--r-- | arch/sh/kernel/traps.c | 949 |
1 files changed, 35 insertions, 914 deletions
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index cf99111cb33f..a3bdc68ef02c 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c | |||
@@ -1,947 +1,68 @@ | |||
1 | /* | ||
2 | * 'traps.c' handles hardware traps and faults after we have saved some | ||
3 | * state in 'entry.S'. | ||
4 | * | ||
5 | * SuperH version: Copyright (C) 1999 Niibe Yutaka | ||
6 | * Copyright (C) 2000 Philipp Rumpf | ||
7 | * Copyright (C) 2000 David Howells | ||
8 | * Copyright (C) 2002 - 2007 Paul Mundt | ||
9 | * | ||
10 | * This file is subject to the terms and conditions of the GNU General Public | ||
11 | * License. See the file "COPYING" in the main directory of this archive | ||
12 | * for more details. | ||
13 | */ | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/ptrace.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/kallsyms.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/bug.h> | 1 | #include <linux/bug.h> |
22 | #include <linux/debug_locks.h> | 2 | #include <linux/io.h> |
3 | #include <linux/types.h> | ||
23 | #include <linux/kdebug.h> | 4 | #include <linux/kdebug.h> |
24 | #include <linux/kexec.h> | 5 | #include <linux/signal.h> |
25 | #include <linux/limits.h> | 6 | #include <linux/sched.h> |
26 | #include <asm/system.h> | 7 | #include <asm/system.h> |
27 | #include <asm/uaccess.h> | ||
28 | |||
29 | #ifdef CONFIG_SH_KGDB | ||
30 | #include <asm/kgdb.h> | ||
31 | #define CHK_REMOTE_DEBUG(regs) \ | ||
32 | { \ | ||
33 | if (kgdb_debug_hook && !user_mode(regs))\ | ||
34 | (*kgdb_debug_hook)(regs); \ | ||
35 | } | ||
36 | #else | ||
37 | #define CHK_REMOTE_DEBUG(regs) | ||
38 | #endif | ||
39 | |||
40 | #ifdef CONFIG_CPU_SH2 | ||
41 | # define TRAP_RESERVED_INST 4 | ||
42 | # define TRAP_ILLEGAL_SLOT_INST 6 | ||
43 | # define TRAP_ADDRESS_ERROR 9 | ||
44 | # ifdef CONFIG_CPU_SH2A | ||
45 | # define TRAP_DIVZERO_ERROR 17 | ||
46 | # define TRAP_DIVOVF_ERROR 18 | ||
47 | # endif | ||
48 | #else | ||
49 | #define TRAP_RESERVED_INST 12 | ||
50 | #define TRAP_ILLEGAL_SLOT_INST 13 | ||
51 | #endif | ||
52 | |||
53 | static void dump_mem(const char *str, unsigned long bottom, unsigned long top) | ||
54 | { | ||
55 | unsigned long p; | ||
56 | int i; | ||
57 | |||
58 | printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top); | ||
59 | |||
60 | for (p = bottom & ~31; p < top; ) { | ||
61 | printk("%04lx: ", p & 0xffff); | ||
62 | |||
63 | for (i = 0; i < 8; i++, p += 4) { | ||
64 | unsigned int val; | ||
65 | |||
66 | if (p < bottom || p >= top) | ||
67 | printk(" "); | ||
68 | else { | ||
69 | if (__get_user(val, (unsigned int __user *)p)) { | ||
70 | printk("\n"); | ||
71 | return; | ||
72 | } | ||
73 | printk("%08x ", val); | ||
74 | } | ||
75 | } | ||
76 | printk("\n"); | ||
77 | } | ||
78 | } | ||
79 | |||
80 | static DEFINE_SPINLOCK(die_lock); | ||
81 | |||
82 | void die(const char * str, struct pt_regs * regs, long err) | ||
83 | { | ||
84 | static int die_counter; | ||
85 | |||
86 | oops_enter(); | ||
87 | |||
88 | console_verbose(); | ||
89 | spin_lock_irq(&die_lock); | ||
90 | bust_spinlocks(1); | ||
91 | |||
92 | printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); | ||
93 | |||
94 | CHK_REMOTE_DEBUG(regs); | ||
95 | print_modules(); | ||
96 | show_regs(regs); | ||
97 | |||
98 | printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm, | ||
99 | task_pid_nr(current), task_stack_page(current) + 1); | ||
100 | |||
101 | if (!user_mode(regs) || in_interrupt()) | ||
102 | dump_mem("Stack: ", regs->regs[15], THREAD_SIZE + | ||
103 | (unsigned long)task_stack_page(current)); | ||
104 | |||
105 | bust_spinlocks(0); | ||
106 | add_taint(TAINT_DIE); | ||
107 | spin_unlock_irq(&die_lock); | ||
108 | |||
109 | if (kexec_should_crash(current)) | ||
110 | crash_kexec(regs); | ||
111 | |||
112 | if (in_interrupt()) | ||
113 | panic("Fatal exception in interrupt"); | ||
114 | |||
115 | if (panic_on_oops) | ||
116 | panic("Fatal exception"); | ||
117 | |||
118 | oops_exit(); | ||
119 | do_exit(SIGSEGV); | ||
120 | } | ||
121 | |||
122 | static inline void die_if_kernel(const char *str, struct pt_regs *regs, | ||
123 | long err) | ||
124 | { | ||
125 | if (!user_mode(regs)) | ||
126 | die(str, regs, err); | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * try and fix up kernelspace address errors | ||
131 | * - userspace errors just cause EFAULT to be returned, resulting in SEGV | ||
132 | * - kernel/userspace interfaces cause a jump to an appropriate handler | ||
133 | * - other kernel errors are bad | ||
134 | * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault | ||
135 | */ | ||
136 | static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err) | ||
137 | { | ||
138 | if (!user_mode(regs)) { | ||
139 | const struct exception_table_entry *fixup; | ||
140 | fixup = search_exception_tables(regs->pc); | ||
141 | if (fixup) { | ||
142 | regs->pc = fixup->fixup; | ||
143 | return 0; | ||
144 | } | ||
145 | die(str, regs, err); | ||
146 | } | ||
147 | return -EFAULT; | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * handle an instruction that does an unaligned memory access by emulating the | ||
152 | * desired behaviour | ||
153 | * - note that PC _may not_ point to the faulting instruction | ||
154 | * (if that instruction is in a branch delay slot) | ||
155 | * - return 0 if emulation okay, -EFAULT on existential error | ||
156 | */ | ||
157 | static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) | ||
158 | { | ||
159 | int ret, index, count; | ||
160 | unsigned long *rm, *rn; | ||
161 | unsigned char *src, *dst; | ||
162 | |||
163 | index = (instruction>>8)&15; /* 0x0F00 */ | ||
164 | rn = ®s->regs[index]; | ||
165 | |||
166 | index = (instruction>>4)&15; /* 0x00F0 */ | ||
167 | rm = ®s->regs[index]; | ||
168 | |||
169 | count = 1<<(instruction&3); | ||
170 | |||
171 | ret = -EFAULT; | ||
172 | switch (instruction>>12) { | ||
173 | case 0: /* mov.[bwl] to/from memory via r0+rn */ | ||
174 | if (instruction & 8) { | ||
175 | /* from memory */ | ||
176 | src = (unsigned char*) *rm; | ||
177 | src += regs->regs[0]; | ||
178 | dst = (unsigned char*) rn; | ||
179 | *(unsigned long*)dst = 0; | ||
180 | |||
181 | #ifdef __LITTLE_ENDIAN__ | ||
182 | if (copy_from_user(dst, src, count)) | ||
183 | goto fetch_fault; | ||
184 | |||
185 | if ((count == 2) && dst[1] & 0x80) { | ||
186 | dst[2] = 0xff; | ||
187 | dst[3] = 0xff; | ||
188 | } | ||
189 | #else | ||
190 | dst += 4-count; | ||
191 | |||
192 | if (__copy_user(dst, src, count)) | ||
193 | goto fetch_fault; | ||
194 | |||
195 | if ((count == 2) && dst[2] & 0x80) { | ||
196 | dst[0] = 0xff; | ||
197 | dst[1] = 0xff; | ||
198 | } | ||
199 | #endif | ||
200 | } else { | ||
201 | /* to memory */ | ||
202 | src = (unsigned char*) rm; | ||
203 | #if !defined(__LITTLE_ENDIAN__) | ||
204 | src += 4-count; | ||
205 | #endif | ||
206 | dst = (unsigned char*) *rn; | ||
207 | dst += regs->regs[0]; | ||
208 | |||
209 | if (copy_to_user(dst, src, count)) | ||
210 | goto fetch_fault; | ||
211 | } | ||
212 | ret = 0; | ||
213 | break; | ||
214 | |||
215 | case 1: /* mov.l Rm,@(disp,Rn) */ | ||
216 | src = (unsigned char*) rm; | ||
217 | dst = (unsigned char*) *rn; | ||
218 | dst += (instruction&0x000F)<<2; | ||
219 | |||
220 | if (copy_to_user(dst,src,4)) | ||
221 | goto fetch_fault; | ||
222 | ret = 0; | ||
223 | break; | ||
224 | |||
225 | case 2: /* mov.[bwl] to memory, possibly with pre-decrement */ | ||
226 | if (instruction & 4) | ||
227 | *rn -= count; | ||
228 | src = (unsigned char*) rm; | ||
229 | dst = (unsigned char*) *rn; | ||
230 | #if !defined(__LITTLE_ENDIAN__) | ||
231 | src += 4-count; | ||
232 | #endif | ||
233 | if (copy_to_user(dst, src, count)) | ||
234 | goto fetch_fault; | ||
235 | ret = 0; | ||
236 | break; | ||
237 | |||
238 | case 5: /* mov.l @(disp,Rm),Rn */ | ||
239 | src = (unsigned char*) *rm; | ||
240 | src += (instruction&0x000F)<<2; | ||
241 | dst = (unsigned char*) rn; | ||
242 | *(unsigned long*)dst = 0; | ||
243 | |||
244 | if (copy_from_user(dst,src,4)) | ||
245 | goto fetch_fault; | ||
246 | ret = 0; | ||
247 | break; | ||
248 | 8 | ||
249 | case 6: /* mov.[bwl] from memory, possibly with post-increment */ | 9 | #ifdef CONFIG_BUG |
250 | src = (unsigned char*) *rm; | 10 | static void handle_BUG(struct pt_regs *regs) |
251 | if (instruction & 4) | ||
252 | *rm += count; | ||
253 | dst = (unsigned char*) rn; | ||
254 | *(unsigned long*)dst = 0; | ||
255 | |||
256 | #ifdef __LITTLE_ENDIAN__ | ||
257 | if (copy_from_user(dst, src, count)) | ||
258 | goto fetch_fault; | ||
259 | |||
260 | if ((count == 2) && dst[1] & 0x80) { | ||
261 | dst[2] = 0xff; | ||
262 | dst[3] = 0xff; | ||
263 | } | ||
264 | #else | ||
265 | dst += 4-count; | ||
266 | |||
267 | if (copy_from_user(dst, src, count)) | ||
268 | goto fetch_fault; | ||
269 | |||
270 | if ((count == 2) && dst[2] & 0x80) { | ||
271 | dst[0] = 0xff; | ||
272 | dst[1] = 0xff; | ||
273 | } | ||
274 | #endif | ||
275 | ret = 0; | ||
276 | break; | ||
277 | |||
278 | case 8: | ||
279 | switch ((instruction&0xFF00)>>8) { | ||
280 | case 0x81: /* mov.w R0,@(disp,Rn) */ | ||
281 | src = (unsigned char*) ®s->regs[0]; | ||
282 | #if !defined(__LITTLE_ENDIAN__) | ||
283 | src += 2; | ||
284 | #endif | ||
285 | dst = (unsigned char*) *rm; /* called Rn in the spec */ | ||
286 | dst += (instruction&0x000F)<<1; | ||
287 | |||
288 | if (copy_to_user(dst, src, 2)) | ||
289 | goto fetch_fault; | ||
290 | ret = 0; | ||
291 | break; | ||
292 | |||
293 | case 0x85: /* mov.w @(disp,Rm),R0 */ | ||
294 | src = (unsigned char*) *rm; | ||
295 | src += (instruction&0x000F)<<1; | ||
296 | dst = (unsigned char*) ®s->regs[0]; | ||
297 | *(unsigned long*)dst = 0; | ||
298 | |||
299 | #if !defined(__LITTLE_ENDIAN__) | ||
300 | dst += 2; | ||
301 | #endif | ||
302 | |||
303 | if (copy_from_user(dst, src, 2)) | ||
304 | goto fetch_fault; | ||
305 | |||
306 | #ifdef __LITTLE_ENDIAN__ | ||
307 | if (dst[1] & 0x80) { | ||
308 | dst[2] = 0xff; | ||
309 | dst[3] = 0xff; | ||
310 | } | ||
311 | #else | ||
312 | if (dst[2] & 0x80) { | ||
313 | dst[0] = 0xff; | ||
314 | dst[1] = 0xff; | ||
315 | } | ||
316 | #endif | ||
317 | ret = 0; | ||
318 | break; | ||
319 | } | ||
320 | break; | ||
321 | } | ||
322 | return ret; | ||
323 | |||
324 | fetch_fault: | ||
325 | /* Argh. Address not only misaligned but also non-existent. | ||
326 | * Raise an EFAULT and see if it's trapped | ||
327 | */ | ||
328 | return die_if_no_fixup("Fault in unaligned fixup", regs, 0); | ||
329 | } | ||
330 | |||
331 | /* | ||
332 | * emulate the instruction in the delay slot | ||
333 | * - fetches the instruction from PC+2 | ||
334 | */ | ||
335 | static inline int handle_unaligned_delayslot(struct pt_regs *regs) | ||
336 | { | 11 | { |
337 | u16 instruction; | 12 | enum bug_trap_type tt; |
338 | 13 | tt = report_bug(regs->pc, regs); | |
339 | if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) { | 14 | if (tt == BUG_TRAP_TYPE_WARN) { |
340 | /* the instruction-fetch faulted */ | 15 | regs->pc += instruction_size(regs->pc); |
341 | if (user_mode(regs)) | 16 | return; |
342 | return -EFAULT; | ||
343 | |||
344 | /* kernel */ | ||
345 | die("delay-slot-insn faulting in handle_unaligned_delayslot", | ||
346 | regs, 0); | ||
347 | } | 17 | } |
348 | 18 | ||
349 | return handle_unaligned_ins(instruction,regs); | 19 | die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff); |
350 | } | 20 | } |
351 | 21 | ||
352 | /* | 22 | int is_valid_bugaddr(unsigned long addr) |
353 | * handle an instruction that does an unaligned memory access | ||
354 | * - have to be careful of branch delay-slot instructions that fault | ||
355 | * SH3: | ||
356 | * - if the branch would be taken PC points to the branch | ||
357 | * - if the branch would not be taken, PC points to delay-slot | ||
358 | * SH4: | ||
359 | * - PC always points to delayed branch | ||
360 | * - return 0 if handled, -EFAULT if failed (may not return if in kernel) | ||
361 | */ | ||
362 | |||
363 | /* Macros to determine offset from current PC for branch instructions */ | ||
364 | /* Explicit type coercion is used to force sign extension where needed */ | ||
365 | #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4) | ||
366 | #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4) | ||
367 | |||
368 | /* | ||
369 | * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit | ||
370 | * opcodes.. | ||
371 | */ | ||
372 | #ifndef CONFIG_CPU_SH2A | ||
373 | static int handle_unaligned_notify_count = 10; | ||
374 | |||
375 | static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) | ||
376 | { | 23 | { |
377 | u_int rm; | 24 | return addr >= PAGE_OFFSET; |
378 | int ret, index; | ||
379 | |||
380 | index = (instruction>>8)&15; /* 0x0F00 */ | ||
381 | rm = regs->regs[index]; | ||
382 | |||
383 | /* shout about the first ten userspace fixups */ | ||
384 | if (user_mode(regs) && handle_unaligned_notify_count>0) { | ||
385 | handle_unaligned_notify_count--; | ||
386 | |||
387 | printk(KERN_NOTICE "Fixing up unaligned userspace access " | ||
388 | "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", | ||
389 | current->comm, task_pid_nr(current), | ||
390 | (u16 *)regs->pc, instruction); | ||
391 | } | ||
392 | |||
393 | ret = -EFAULT; | ||
394 | switch (instruction&0xF000) { | ||
395 | case 0x0000: | ||
396 | if (instruction==0x000B) { | ||
397 | /* rts */ | ||
398 | ret = handle_unaligned_delayslot(regs); | ||
399 | if (ret==0) | ||
400 | regs->pc = regs->pr; | ||
401 | } | ||
402 | else if ((instruction&0x00FF)==0x0023) { | ||
403 | /* braf @Rm */ | ||
404 | ret = handle_unaligned_delayslot(regs); | ||
405 | if (ret==0) | ||
406 | regs->pc += rm + 4; | ||
407 | } | ||
408 | else if ((instruction&0x00FF)==0x0003) { | ||
409 | /* bsrf @Rm */ | ||
410 | ret = handle_unaligned_delayslot(regs); | ||
411 | if (ret==0) { | ||
412 | regs->pr = regs->pc + 4; | ||
413 | regs->pc += rm + 4; | ||
414 | } | ||
415 | } | ||
416 | else { | ||
417 | /* mov.[bwl] to/from memory via r0+rn */ | ||
418 | goto simple; | ||
419 | } | ||
420 | break; | ||
421 | |||
422 | case 0x1000: /* mov.l Rm,@(disp,Rn) */ | ||
423 | goto simple; | ||
424 | |||
425 | case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */ | ||
426 | goto simple; | ||
427 | |||
428 | case 0x4000: | ||
429 | if ((instruction&0x00FF)==0x002B) { | ||
430 | /* jmp @Rm */ | ||
431 | ret = handle_unaligned_delayslot(regs); | ||
432 | if (ret==0) | ||
433 | regs->pc = rm; | ||
434 | } | ||
435 | else if ((instruction&0x00FF)==0x000B) { | ||
436 | /* jsr @Rm */ | ||
437 | ret = handle_unaligned_delayslot(regs); | ||
438 | if (ret==0) { | ||
439 | regs->pr = regs->pc + 4; | ||
440 | regs->pc = rm; | ||
441 | } | ||
442 | } | ||
443 | else { | ||
444 | /* mov.[bwl] to/from memory via r0+rn */ | ||
445 | goto simple; | ||
446 | } | ||
447 | break; | ||
448 | |||
449 | case 0x5000: /* mov.l @(disp,Rm),Rn */ | ||
450 | goto simple; | ||
451 | |||
452 | case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */ | ||
453 | goto simple; | ||
454 | |||
455 | case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */ | ||
456 | switch (instruction&0x0F00) { | ||
457 | case 0x0100: /* mov.w R0,@(disp,Rm) */ | ||
458 | goto simple; | ||
459 | case 0x0500: /* mov.w @(disp,Rm),R0 */ | ||
460 | goto simple; | ||
461 | case 0x0B00: /* bf lab - no delayslot*/ | ||
462 | break; | ||
463 | case 0x0F00: /* bf/s lab */ | ||
464 | ret = handle_unaligned_delayslot(regs); | ||
465 | if (ret==0) { | ||
466 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) | ||
467 | if ((regs->sr & 0x00000001) != 0) | ||
468 | regs->pc += 4; /* next after slot */ | ||
469 | else | ||
470 | #endif | ||
471 | regs->pc += SH_PC_8BIT_OFFSET(instruction); | ||
472 | } | ||
473 | break; | ||
474 | case 0x0900: /* bt lab - no delayslot */ | ||
475 | break; | ||
476 | case 0x0D00: /* bt/s lab */ | ||
477 | ret = handle_unaligned_delayslot(regs); | ||
478 | if (ret==0) { | ||
479 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) | ||
480 | if ((regs->sr & 0x00000001) == 0) | ||
481 | regs->pc += 4; /* next after slot */ | ||
482 | else | ||
483 | #endif | ||
484 | regs->pc += SH_PC_8BIT_OFFSET(instruction); | ||
485 | } | ||
486 | break; | ||
487 | } | ||
488 | break; | ||
489 | |||
490 | case 0xA000: /* bra label */ | ||
491 | ret = handle_unaligned_delayslot(regs); | ||
492 | if (ret==0) | ||
493 | regs->pc += SH_PC_12BIT_OFFSET(instruction); | ||
494 | break; | ||
495 | |||
496 | case 0xB000: /* bsr label */ | ||
497 | ret = handle_unaligned_delayslot(regs); | ||
498 | if (ret==0) { | ||
499 | regs->pr = regs->pc + 4; | ||
500 | regs->pc += SH_PC_12BIT_OFFSET(instruction); | ||
501 | } | ||
502 | break; | ||
503 | } | ||
504 | return ret; | ||
505 | |||
506 | /* handle non-delay-slot instruction */ | ||
507 | simple: | ||
508 | ret = handle_unaligned_ins(instruction,regs); | ||
509 | if (ret==0) | ||
510 | regs->pc += instruction_size(instruction); | ||
511 | return ret; | ||
512 | } | 25 | } |
513 | #endif /* CONFIG_CPU_SH2A */ | ||
514 | |||
515 | #ifdef CONFIG_CPU_HAS_SR_RB | ||
516 | #define lookup_exception_vector(x) \ | ||
517 | __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x))) | ||
518 | #else | ||
519 | #define lookup_exception_vector(x) \ | ||
520 | __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x))) | ||
521 | #endif | 26 | #endif |
522 | 27 | ||
523 | /* | 28 | /* |
524 | * Handle various address error exceptions: | 29 | * Generic trap handler. |
525 | * - instruction address error: | ||
526 | * misaligned PC | ||
527 | * PC >= 0x80000000 in user mode | ||
528 | * - data address error (read and write) | ||
529 | * misaligned data access | ||
530 | * access to >= 0x80000000 is user mode | ||
531 | * Unfortuntaly we can't distinguish between instruction address error | ||
532 | * and data address errors caused by read accesses. | ||
533 | */ | 30 | */ |
534 | asmlinkage void do_address_error(struct pt_regs *regs, | 31 | BUILD_TRAP_HANDLER(debug) |
535 | unsigned long writeaccess, | ||
536 | unsigned long address) | ||
537 | { | 32 | { |
538 | unsigned long error_code = 0; | 33 | TRAP_HANDLER_DECL; |
539 | mm_segment_t oldfs; | ||
540 | siginfo_t info; | ||
541 | #ifndef CONFIG_CPU_SH2A | ||
542 | u16 instruction; | ||
543 | int tmp; | ||
544 | #endif | ||
545 | |||
546 | /* Intentional ifdef */ | ||
547 | #ifdef CONFIG_CPU_HAS_SR_RB | ||
548 | lookup_exception_vector(error_code); | ||
549 | #endif | ||
550 | |||
551 | oldfs = get_fs(); | ||
552 | |||
553 | if (user_mode(regs)) { | ||
554 | int si_code = BUS_ADRERR; | ||
555 | |||
556 | local_irq_enable(); | ||
557 | 34 | ||
558 | /* bad PC is not something we can fix */ | 35 | /* Rewind */ |
559 | if (regs->pc & 1) { | 36 | regs->pc -= instruction_size(ctrl_inw(regs->pc - 4)); |
560 | si_code = BUS_ADRALN; | ||
561 | goto uspace_segv; | ||
562 | } | ||
563 | 37 | ||
564 | #ifndef CONFIG_CPU_SH2A | 38 | if (notify_die(DIE_TRAP, "debug trap", regs, 0, vec & 0xff, |
565 | set_fs(USER_DS); | 39 | SIGTRAP) == NOTIFY_STOP) |
566 | if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { | 40 | return; |
567 | /* Argh. Fault on the instruction itself. | ||
568 | This should never happen non-SMP | ||
569 | */ | ||
570 | set_fs(oldfs); | ||
571 | goto uspace_segv; | ||
572 | } | ||
573 | |||
574 | tmp = handle_unaligned_access(instruction, regs); | ||
575 | set_fs(oldfs); | ||
576 | |||
577 | if (tmp==0) | ||
578 | return; /* sorted */ | ||
579 | #endif | ||
580 | |||
581 | uspace_segv: | ||
582 | printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned " | ||
583 | "access (PC %lx PR %lx)\n", current->comm, regs->pc, | ||
584 | regs->pr); | ||
585 | |||
586 | info.si_signo = SIGBUS; | ||
587 | info.si_errno = 0; | ||
588 | info.si_code = si_code; | ||
589 | info.si_addr = (void __user *)address; | ||
590 | force_sig_info(SIGBUS, &info, current); | ||
591 | } else { | ||
592 | if (regs->pc & 1) | ||
593 | die("unaligned program counter", regs, error_code); | ||
594 | |||
595 | #ifndef CONFIG_CPU_SH2A | ||
596 | set_fs(KERNEL_DS); | ||
597 | if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { | ||
598 | /* Argh. Fault on the instruction itself. | ||
599 | This should never happen non-SMP | ||
600 | */ | ||
601 | set_fs(oldfs); | ||
602 | die("insn faulting in do_address_error", regs, 0); | ||
603 | } | ||
604 | |||
605 | handle_unaligned_access(instruction, regs); | ||
606 | set_fs(oldfs); | ||
607 | #else | ||
608 | printk(KERN_NOTICE "Killing process \"%s\" due to unaligned " | ||
609 | "access\n", current->comm); | ||
610 | 41 | ||
611 | force_sig(SIGSEGV, current); | 42 | force_sig(SIGTRAP, current); |
612 | #endif | ||
613 | } | ||
614 | } | 43 | } |
615 | 44 | ||
616 | #ifdef CONFIG_SH_DSP | ||
617 | /* | 45 | /* |
618 | * SH-DSP support gerg@snapgear.com. | 46 | * Special handler for BUG() traps. |
619 | */ | 47 | */ |
620 | int is_dsp_inst(struct pt_regs *regs) | 48 | BUILD_TRAP_HANDLER(bug) |
621 | { | 49 | { |
622 | unsigned short inst = 0; | 50 | TRAP_HANDLER_DECL; |
623 | |||
624 | /* | ||
625 | * Safe guard if DSP mode is already enabled or we're lacking | ||
626 | * the DSP altogether. | ||
627 | */ | ||
628 | if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP)) | ||
629 | return 0; | ||
630 | |||
631 | get_user(inst, ((unsigned short *) regs->pc)); | ||
632 | |||
633 | inst &= 0xf000; | ||
634 | |||
635 | /* Check for any type of DSP or support instruction */ | ||
636 | if ((inst == 0xf000) || (inst == 0x4000)) | ||
637 | return 1; | ||
638 | |||
639 | return 0; | ||
640 | } | ||
641 | #else | ||
642 | #define is_dsp_inst(regs) (0) | ||
643 | #endif /* CONFIG_SH_DSP */ | ||
644 | 51 | ||
645 | #ifdef CONFIG_CPU_SH2A | 52 | /* Rewind */ |
646 | asmlinkage void do_divide_error(unsigned long r4, unsigned long r5, | 53 | regs->pc -= instruction_size(ctrl_inw(regs->pc - 4)); |
647 | unsigned long r6, unsigned long r7, | ||
648 | struct pt_regs __regs) | ||
649 | { | ||
650 | siginfo_t info; | ||
651 | |||
652 | switch (r4) { | ||
653 | case TRAP_DIVZERO_ERROR: | ||
654 | info.si_code = FPE_INTDIV; | ||
655 | break; | ||
656 | case TRAP_DIVOVF_ERROR: | ||
657 | info.si_code = FPE_INTOVF; | ||
658 | break; | ||
659 | } | ||
660 | |||
661 | force_sig_info(SIGFPE, &info, current); | ||
662 | } | ||
663 | #endif | ||
664 | |||
665 | /* arch/sh/kernel/cpu/sh4/fpu.c */ | ||
666 | extern int do_fpu_inst(unsigned short, struct pt_regs *); | ||
667 | extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5, | ||
668 | unsigned long r6, unsigned long r7, struct pt_regs __regs); | ||
669 | |||
670 | asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, | ||
671 | unsigned long r6, unsigned long r7, | ||
672 | struct pt_regs __regs) | ||
673 | { | ||
674 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
675 | unsigned long error_code; | ||
676 | struct task_struct *tsk = current; | ||
677 | |||
678 | #ifdef CONFIG_SH_FPU_EMU | ||
679 | unsigned short inst = 0; | ||
680 | int err; | ||
681 | |||
682 | get_user(inst, (unsigned short*)regs->pc); | ||
683 | |||
684 | err = do_fpu_inst(inst, regs); | ||
685 | if (!err) { | ||
686 | regs->pc += instruction_size(inst); | ||
687 | return; | ||
688 | } | ||
689 | /* not a FPU inst. */ | ||
690 | #endif | ||
691 | 54 | ||
692 | #ifdef CONFIG_SH_DSP | 55 | if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff, |
693 | /* Check if it's a DSP instruction */ | 56 | SIGTRAP) == NOTIFY_STOP) |
694 | if (is_dsp_inst(regs)) { | ||
695 | /* Enable DSP mode, and restart instruction. */ | ||
696 | regs->sr |= SR_DSP; | ||
697 | return; | 57 | return; |
698 | } | ||
699 | #endif | ||
700 | |||
701 | lookup_exception_vector(error_code); | ||
702 | |||
703 | local_irq_enable(); | ||
704 | CHK_REMOTE_DEBUG(regs); | ||
705 | force_sig(SIGILL, tsk); | ||
706 | die_if_no_fixup("reserved instruction", regs, error_code); | ||
707 | } | ||
708 | |||
709 | #ifdef CONFIG_SH_FPU_EMU | ||
710 | static int emulate_branch(unsigned short inst, struct pt_regs* regs) | ||
711 | { | ||
712 | /* | ||
713 | * bfs: 8fxx: PC+=d*2+4; | ||
714 | * bts: 8dxx: PC+=d*2+4; | ||
715 | * bra: axxx: PC+=D*2+4; | ||
716 | * bsr: bxxx: PC+=D*2+4 after PR=PC+4; | ||
717 | * braf:0x23: PC+=Rn*2+4; | ||
718 | * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4; | ||
719 | * jmp: 4x2b: PC=Rn; | ||
720 | * jsr: 4x0b: PC=Rn after PR=PC+4; | ||
721 | * rts: 000b: PC=PR; | ||
722 | */ | ||
723 | if ((inst & 0xfd00) == 0x8d00) { | ||
724 | regs->pc += SH_PC_8BIT_OFFSET(inst); | ||
725 | return 0; | ||
726 | } | ||
727 | |||
728 | if ((inst & 0xe000) == 0xa000) { | ||
729 | regs->pc += SH_PC_12BIT_OFFSET(inst); | ||
730 | return 0; | ||
731 | } | ||
732 | |||
733 | if ((inst & 0xf0df) == 0x0003) { | ||
734 | regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4; | ||
735 | return 0; | ||
736 | } | ||
737 | |||
738 | if ((inst & 0xf0df) == 0x400b) { | ||
739 | regs->pc = regs->regs[(inst & 0x0f00) >> 8]; | ||
740 | return 0; | ||
741 | } | ||
742 | |||
743 | if ((inst & 0xffff) == 0x000b) { | ||
744 | regs->pc = regs->pr; | ||
745 | return 0; | ||
746 | } | ||
747 | |||
748 | return 1; | ||
749 | } | ||
750 | #endif | ||
751 | |||
752 | asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, | ||
753 | unsigned long r6, unsigned long r7, | ||
754 | struct pt_regs __regs) | ||
755 | { | ||
756 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
757 | unsigned long error_code; | ||
758 | struct task_struct *tsk = current; | ||
759 | #ifdef CONFIG_SH_FPU_EMU | ||
760 | unsigned short inst = 0; | ||
761 | |||
762 | get_user(inst, (unsigned short *)regs->pc + 1); | ||
763 | if (!do_fpu_inst(inst, regs)) { | ||
764 | get_user(inst, (unsigned short *)regs->pc); | ||
765 | if (!emulate_branch(inst, regs)) | ||
766 | return; | ||
767 | /* fault in branch.*/ | ||
768 | } | ||
769 | /* not a FPU inst. */ | ||
770 | #endif | ||
771 | |||
772 | lookup_exception_vector(error_code); | ||
773 | |||
774 | local_irq_enable(); | ||
775 | CHK_REMOTE_DEBUG(regs); | ||
776 | force_sig(SIGILL, tsk); | ||
777 | die_if_no_fixup("illegal slot instruction", regs, error_code); | ||
778 | } | ||
779 | |||
780 | asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, | ||
781 | unsigned long r6, unsigned long r7, | ||
782 | struct pt_regs __regs) | ||
783 | { | ||
784 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
785 | long ex; | ||
786 | |||
787 | lookup_exception_vector(ex); | ||
788 | die_if_kernel("exception", regs, ex); | ||
789 | } | ||
790 | |||
791 | #if defined(CONFIG_SH_STANDARD_BIOS) | ||
792 | void *gdb_vbr_vector; | ||
793 | |||
794 | static inline void __init gdb_vbr_init(void) | ||
795 | { | ||
796 | register unsigned long vbr; | ||
797 | |||
798 | /* | ||
799 | * Read the old value of the VBR register to initialise | ||
800 | * the vector through which debug and BIOS traps are | ||
801 | * delegated by the Linux trap handler. | ||
802 | */ | ||
803 | asm volatile("stc vbr, %0" : "=r" (vbr)); | ||
804 | |||
805 | gdb_vbr_vector = (void *)(vbr + 0x100); | ||
806 | printk("Setting GDB trap vector to 0x%08lx\n", | ||
807 | (unsigned long)gdb_vbr_vector); | ||
808 | } | ||
809 | #endif | ||
810 | |||
811 | void __cpuinit per_cpu_trap_init(void) | ||
812 | { | ||
813 | extern void *vbr_base; | ||
814 | |||
815 | #ifdef CONFIG_SH_STANDARD_BIOS | ||
816 | if (raw_smp_processor_id() == 0) | ||
817 | gdb_vbr_init(); | ||
818 | #endif | ||
819 | |||
820 | /* NOTE: The VBR value should be at P1 | ||
821 | (or P2, virtural "fixed" address space). | ||
822 | It's definitely should not in physical address. */ | ||
823 | |||
824 | asm volatile("ldc %0, vbr" | ||
825 | : /* no output */ | ||
826 | : "r" (&vbr_base) | ||
827 | : "memory"); | ||
828 | } | ||
829 | |||
830 | void *set_exception_table_vec(unsigned int vec, void *handler) | ||
831 | { | ||
832 | extern void *exception_handling_table[]; | ||
833 | void *old_handler; | ||
834 | |||
835 | old_handler = exception_handling_table[vec]; | ||
836 | exception_handling_table[vec] = handler; | ||
837 | return old_handler; | ||
838 | } | ||
839 | |||
840 | extern asmlinkage void address_error_handler(unsigned long r4, unsigned long r5, | ||
841 | unsigned long r6, unsigned long r7, | ||
842 | struct pt_regs __regs); | ||
843 | |||
844 | void __init trap_init(void) | ||
845 | { | ||
846 | set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst); | ||
847 | set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst); | ||
848 | |||
849 | #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \ | ||
850 | defined(CONFIG_SH_FPU_EMU) | ||
851 | /* | ||
852 | * For SH-4 lacking an FPU, treat floating point instructions as | ||
853 | * reserved. They'll be handled in the math-emu case, or faulted on | ||
854 | * otherwise. | ||
855 | */ | ||
856 | set_exception_table_evt(0x800, do_reserved_inst); | ||
857 | set_exception_table_evt(0x820, do_illegal_slot_inst); | ||
858 | #elif defined(CONFIG_SH_FPU) | ||
859 | #ifdef CONFIG_CPU_SUBTYPE_SHX3 | ||
860 | set_exception_table_evt(0xd80, do_fpu_state_restore); | ||
861 | set_exception_table_evt(0xda0, do_fpu_state_restore); | ||
862 | #else | ||
863 | set_exception_table_evt(0x800, do_fpu_state_restore); | ||
864 | set_exception_table_evt(0x820, do_fpu_state_restore); | ||
865 | #endif | ||
866 | #endif | ||
867 | |||
868 | #ifdef CONFIG_CPU_SH2 | ||
869 | set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_handler); | ||
870 | #endif | ||
871 | #ifdef CONFIG_CPU_SH2A | ||
872 | set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error); | ||
873 | set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error); | ||
874 | #endif | ||
875 | |||
876 | /* Setup VBR for boot cpu */ | ||
877 | per_cpu_trap_init(); | ||
878 | } | ||
879 | 58 | ||
880 | #ifdef CONFIG_BUG | 59 | #ifdef CONFIG_BUG |
881 | void handle_BUG(struct pt_regs *regs) | 60 | if (__kernel_text_address(instruction_pointer(regs))) { |
882 | { | 61 | opcode_t insn = *(opcode_t *)instruction_pointer(regs); |
883 | enum bug_trap_type tt; | 62 | if (insn == TRAPA_BUG_OPCODE) |
884 | tt = report_bug(regs->pc, regs); | 63 | handle_BUG(regs); |
885 | if (tt == BUG_TRAP_TYPE_WARN) { | ||
886 | regs->pc += 2; | ||
887 | return; | ||
888 | } | 64 | } |
889 | |||
890 | die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff); | ||
891 | } | ||
892 | |||
893 | int is_valid_bugaddr(unsigned long addr) | ||
894 | { | ||
895 | return addr >= PAGE_OFFSET; | ||
896 | } | ||
897 | #endif | ||
898 | |||
899 | void show_trace(struct task_struct *tsk, unsigned long *sp, | ||
900 | struct pt_regs *regs) | ||
901 | { | ||
902 | unsigned long addr; | ||
903 | |||
904 | if (regs && user_mode(regs)) | ||
905 | return; | ||
906 | |||
907 | printk("\nCall trace: "); | ||
908 | #ifdef CONFIG_KALLSYMS | ||
909 | printk("\n"); | ||
910 | #endif | 65 | #endif |
911 | 66 | ||
912 | while (!kstack_end(sp)) { | 67 | force_sig(SIGTRAP, current); |
913 | addr = *sp++; | ||
914 | if (kernel_text_address(addr)) | ||
915 | print_ip_sym(addr); | ||
916 | } | ||
917 | |||
918 | printk("\n"); | ||
919 | |||
920 | if (!tsk) | ||
921 | tsk = current; | ||
922 | |||
923 | debug_show_held_locks(tsk); | ||
924 | } | ||
925 | |||
926 | void show_stack(struct task_struct *tsk, unsigned long *sp) | ||
927 | { | ||
928 | unsigned long stack; | ||
929 | |||
930 | if (!tsk) | ||
931 | tsk = current; | ||
932 | if (tsk == current) | ||
933 | sp = (unsigned long *)current_stack_pointer; | ||
934 | else | ||
935 | sp = (unsigned long *)tsk->thread.sp; | ||
936 | |||
937 | stack = (unsigned long)sp; | ||
938 | dump_mem("Stack: ", stack, THREAD_SIZE + | ||
939 | (unsigned long)task_stack_page(tsk)); | ||
940 | show_trace(tsk, sp, NULL); | ||
941 | } | ||
942 | |||
943 | void dump_stack(void) | ||
944 | { | ||
945 | show_stack(NULL, NULL); | ||
946 | } | 68 | } |
947 | EXPORT_SYMBOL(dump_stack); | ||