diff options
Diffstat (limited to 'arch/sh/kernel/traps.c')
-rw-r--r-- | arch/sh/kernel/traps.c | 396 |
1 files changed, 273 insertions, 123 deletions
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index c2c597e09482..ec110157992d 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c | |||
@@ -1,43 +1,32 @@ | |||
1 | /* $Id: traps.c,v 1.17 2004/05/02 01:46:30 sugioka Exp $ | 1 | /* |
2 | * | 2 | * 'traps.c' handles hardware traps and faults after we have saved some |
3 | * linux/arch/sh/traps.c | 3 | * state in 'entry.S'. |
4 | * | 4 | * |
5 | * SuperH version: Copyright (C) 1999 Niibe Yutaka | 5 | * SuperH version: Copyright (C) 1999 Niibe Yutaka |
6 | * Copyright (C) 2000 Philipp Rumpf | 6 | * Copyright (C) 2000 Philipp Rumpf |
7 | * Copyright (C) 2000 David Howells | 7 | * Copyright (C) 2000 David Howells |
8 | * Copyright (C) 2002, 2003 Paul Mundt | 8 | * Copyright (C) 2002 - 2006 Paul Mundt |
9 | */ | 9 | * |
10 | 10 | * This file is subject to the terms and conditions of the GNU General Public | |
11 | /* | 11 | * License. See the file "COPYING" in the main directory of this archive |
12 | * 'Traps.c' handles hardware traps and faults after we have saved some | 12 | * for more details. |
13 | * state in 'entry.S'. | ||
14 | */ | 13 | */ |
15 | #include <linux/sched.h> | ||
16 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
17 | #include <linux/string.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/ptrace.h> | 15 | #include <linux/ptrace.h> |
20 | #include <linux/timer.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/smp.h> | ||
23 | #include <linux/smp_lock.h> | ||
24 | #include <linux/init.h> | 16 | #include <linux/init.h> |
25 | #include <linux/delay.h> | ||
26 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
27 | #include <linux/module.h> | 18 | #include <linux/module.h> |
28 | #include <linux/kallsyms.h> | 19 | #include <linux/kallsyms.h> |
29 | 20 | #include <linux/io.h> | |
21 | #include <linux/debug_locks.h> | ||
22 | #include <linux/limits.h> | ||
30 | #include <asm/system.h> | 23 | #include <asm/system.h> |
31 | #include <asm/uaccess.h> | 24 | #include <asm/uaccess.h> |
32 | #include <asm/io.h> | ||
33 | #include <asm/atomic.h> | ||
34 | #include <asm/processor.h> | ||
35 | #include <asm/sections.h> | ||
36 | 25 | ||
37 | #ifdef CONFIG_SH_KGDB | 26 | #ifdef CONFIG_SH_KGDB |
38 | #include <asm/kgdb.h> | 27 | #include <asm/kgdb.h> |
39 | #define CHK_REMOTE_DEBUG(regs) \ | 28 | #define CHK_REMOTE_DEBUG(regs) \ |
40 | { \ | 29 | { \ |
41 | if (kgdb_debug_hook && !user_mode(regs))\ | 30 | if (kgdb_debug_hook && !user_mode(regs))\ |
42 | (*kgdb_debug_hook)(regs); \ | 31 | (*kgdb_debug_hook)(regs); \ |
43 | } | 32 | } |
@@ -46,20 +35,44 @@ | |||
46 | #endif | 35 | #endif |
47 | 36 | ||
48 | #ifdef CONFIG_CPU_SH2 | 37 | #ifdef CONFIG_CPU_SH2 |
49 | #define TRAP_RESERVED_INST 4 | 38 | # define TRAP_RESERVED_INST 4 |
50 | #define TRAP_ILLEGAL_SLOT_INST 6 | 39 | # define TRAP_ILLEGAL_SLOT_INST 6 |
40 | # define TRAP_ADDRESS_ERROR 9 | ||
41 | # ifdef CONFIG_CPU_SH2A | ||
42 | # define TRAP_DIVZERO_ERROR 17 | ||
43 | # define TRAP_DIVOVF_ERROR 18 | ||
44 | # endif | ||
51 | #else | 45 | #else |
52 | #define TRAP_RESERVED_INST 12 | 46 | #define TRAP_RESERVED_INST 12 |
53 | #define TRAP_ILLEGAL_SLOT_INST 13 | 47 | #define TRAP_ILLEGAL_SLOT_INST 13 |
54 | #endif | 48 | #endif |
55 | 49 | ||
56 | /* | 50 | static void dump_mem(const char *str, unsigned long bottom, unsigned long top) |
57 | * These constants are for searching for possible module text | 51 | { |
58 | * segments. VMALLOC_OFFSET comes from mm/vmalloc.c; MODULE_RANGE is | 52 | unsigned long p; |
59 | * a guess of how much space is likely to be vmalloced. | 53 | int i; |
60 | */ | 54 | |
61 | #define VMALLOC_OFFSET (8*1024*1024) | 55 | printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top); |
62 | #define MODULE_RANGE (8*1024*1024) | 56 | |
57 | for (p = bottom & ~31; p < top; ) { | ||
58 | printk("%04lx: ", p & 0xffff); | ||
59 | |||
60 | for (i = 0; i < 8; i++, p += 4) { | ||
61 | unsigned int val; | ||
62 | |||
63 | if (p < bottom || p >= top) | ||
64 | printk(" "); | ||
65 | else { | ||
66 | if (__get_user(val, (unsigned int __user *)p)) { | ||
67 | printk("\n"); | ||
68 | return; | ||
69 | } | ||
70 | printk("%08x ", val); | ||
71 | } | ||
72 | } | ||
73 | printk("\n"); | ||
74 | } | ||
75 | } | ||
63 | 76 | ||
64 | DEFINE_SPINLOCK(die_lock); | 77 | DEFINE_SPINLOCK(die_lock); |
65 | 78 | ||
@@ -69,21 +82,33 @@ void die(const char * str, struct pt_regs * regs, long err) | |||
69 | 82 | ||
70 | console_verbose(); | 83 | console_verbose(); |
71 | spin_lock_irq(&die_lock); | 84 | spin_lock_irq(&die_lock); |
85 | bust_spinlocks(1); | ||
86 | |||
72 | printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); | 87 | printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); |
88 | |||
73 | CHK_REMOTE_DEBUG(regs); | 89 | CHK_REMOTE_DEBUG(regs); |
90 | print_modules(); | ||
74 | show_regs(regs); | 91 | show_regs(regs); |
92 | |||
93 | printk("Process: %s (pid: %d, stack limit = %p)\n", | ||
94 | current->comm, current->pid, task_stack_page(current) + 1); | ||
95 | |||
96 | if (!user_mode(regs) || in_interrupt()) | ||
97 | dump_mem("Stack: ", regs->regs[15], THREAD_SIZE + | ||
98 | (unsigned long)task_stack_page(current)); | ||
99 | |||
100 | bust_spinlocks(0); | ||
75 | spin_unlock_irq(&die_lock); | 101 | spin_unlock_irq(&die_lock); |
76 | do_exit(SIGSEGV); | 102 | do_exit(SIGSEGV); |
77 | } | 103 | } |
78 | 104 | ||
79 | static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err) | 105 | static inline void die_if_kernel(const char *str, struct pt_regs *regs, |
106 | long err) | ||
80 | { | 107 | { |
81 | if (!user_mode(regs)) | 108 | if (!user_mode(regs)) |
82 | die(str, regs, err); | 109 | die(str, regs, err); |
83 | } | 110 | } |
84 | 111 | ||
85 | static int handle_unaligned_notify_count = 10; | ||
86 | |||
87 | /* | 112 | /* |
88 | * try and fix up kernelspace address errors | 113 | * try and fix up kernelspace address errors |
89 | * - userspace errors just cause EFAULT to be returned, resulting in SEGV | 114 | * - userspace errors just cause EFAULT to be returned, resulting in SEGV |
@@ -93,8 +118,7 @@ static int handle_unaligned_notify_count = 10; | |||
93 | */ | 118 | */ |
94 | static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err) | 119 | static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err) |
95 | { | 120 | { |
96 | if (!user_mode(regs)) | 121 | if (!user_mode(regs)) { |
97 | { | ||
98 | const struct exception_table_entry *fixup; | 122 | const struct exception_table_entry *fixup; |
99 | fixup = search_exception_tables(regs->pc); | 123 | fixup = search_exception_tables(regs->pc); |
100 | if (fixup) { | 124 | if (fixup) { |
@@ -106,6 +130,40 @@ static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err) | |||
106 | return -EFAULT; | 130 | return -EFAULT; |
107 | } | 131 | } |
108 | 132 | ||
133 | #ifdef CONFIG_BUG | ||
134 | #ifdef CONFIG_DEBUG_BUGVERBOSE | ||
135 | static inline void do_bug_verbose(struct pt_regs *regs) | ||
136 | { | ||
137 | struct bug_frame f; | ||
138 | long len; | ||
139 | |||
140 | if (__copy_from_user(&f, (const void __user *)regs->pc, | ||
141 | sizeof(struct bug_frame))) | ||
142 | return; | ||
143 | |||
144 | len = __strnlen_user(f.file, PATH_MAX) - 1; | ||
145 | if (unlikely(len < 0 || len >= PATH_MAX)) | ||
146 | f.file = "<bad filename>"; | ||
147 | len = __strnlen_user(f.func, PATH_MAX) - 1; | ||
148 | if (unlikely(len < 0 || len >= PATH_MAX)) | ||
149 | f.func = "<bad function>"; | ||
150 | |||
151 | printk(KERN_ALERT "kernel BUG in %s() at %s:%d!\n", | ||
152 | f.func, f.file, f.line); | ||
153 | } | ||
154 | #else | ||
155 | static inline void do_bug_verbose(struct pt_regs *regs) | ||
156 | { | ||
157 | } | ||
158 | #endif /* CONFIG_DEBUG_BUGVERBOSE */ | ||
159 | #endif /* CONFIG_BUG */ | ||
160 | |||
161 | void handle_BUG(struct pt_regs *regs) | ||
162 | { | ||
163 | do_bug_verbose(regs); | ||
164 | die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff); | ||
165 | } | ||
166 | |||
109 | /* | 167 | /* |
110 | * handle an instruction that does an unaligned memory access by emulating the | 168 | * handle an instruction that does an unaligned memory access by emulating the |
111 | * desired behaviour | 169 | * desired behaviour |
@@ -179,7 +237,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) | |||
179 | if (copy_to_user(dst,src,4)) | 237 | if (copy_to_user(dst,src,4)) |
180 | goto fetch_fault; | 238 | goto fetch_fault; |
181 | ret = 0; | 239 | ret = 0; |
182 | break; | 240 | break; |
183 | 241 | ||
184 | case 2: /* mov.[bwl] to memory, possibly with pre-decrement */ | 242 | case 2: /* mov.[bwl] to memory, possibly with pre-decrement */ |
185 | if (instruction & 4) | 243 | if (instruction & 4) |
@@ -203,7 +261,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) | |||
203 | if (copy_from_user(dst,src,4)) | 261 | if (copy_from_user(dst,src,4)) |
204 | goto fetch_fault; | 262 | goto fetch_fault; |
205 | ret = 0; | 263 | ret = 0; |
206 | break; | 264 | break; |
207 | 265 | ||
208 | case 6: /* mov.[bwl] from memory, possibly with post-increment */ | 266 | case 6: /* mov.[bwl] from memory, possibly with post-increment */ |
209 | src = (unsigned char*) *rm; | 267 | src = (unsigned char*) *rm; |
@@ -211,7 +269,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) | |||
211 | *rm += count; | 269 | *rm += count; |
212 | dst = (unsigned char*) rn; | 270 | dst = (unsigned char*) rn; |
213 | *(unsigned long*)dst = 0; | 271 | *(unsigned long*)dst = 0; |
214 | 272 | ||
215 | #ifdef __LITTLE_ENDIAN__ | 273 | #ifdef __LITTLE_ENDIAN__ |
216 | if (copy_from_user(dst, src, count)) | 274 | if (copy_from_user(dst, src, count)) |
217 | goto fetch_fault; | 275 | goto fetch_fault; |
@@ -222,7 +280,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) | |||
222 | } | 280 | } |
223 | #else | 281 | #else |
224 | dst += 4-count; | 282 | dst += 4-count; |
225 | 283 | ||
226 | if (copy_from_user(dst, src, count)) | 284 | if (copy_from_user(dst, src, count)) |
227 | goto fetch_fault; | 285 | goto fetch_fault; |
228 | 286 | ||
@@ -301,7 +359,8 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs) | |||
301 | return -EFAULT; | 359 | return -EFAULT; |
302 | 360 | ||
303 | /* kernel */ | 361 | /* kernel */ |
304 | die("delay-slot-insn faulting in handle_unaligned_delayslot", regs, 0); | 362 | die("delay-slot-insn faulting in handle_unaligned_delayslot", |
363 | regs, 0); | ||
305 | } | 364 | } |
306 | 365 | ||
307 | return handle_unaligned_ins(instruction,regs); | 366 | return handle_unaligned_ins(instruction,regs); |
@@ -323,6 +382,13 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs) | |||
323 | #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4) | 382 | #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4) |
324 | #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4) | 383 | #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4) |
325 | 384 | ||
385 | /* | ||
386 | * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit | ||
387 | * opcodes.. | ||
388 | */ | ||
389 | #ifndef CONFIG_CPU_SH2A | ||
390 | static int handle_unaligned_notify_count = 10; | ||
391 | |||
326 | static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) | 392 | static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) |
327 | { | 393 | { |
328 | u_int rm; | 394 | u_int rm; |
@@ -335,7 +401,8 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) | |||
335 | if (user_mode(regs) && handle_unaligned_notify_count>0) { | 401 | if (user_mode(regs) && handle_unaligned_notify_count>0) { |
336 | handle_unaligned_notify_count--; | 402 | handle_unaligned_notify_count--; |
337 | 403 | ||
338 | printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", | 404 | printk(KERN_NOTICE "Fixing up unaligned userspace access " |
405 | "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", | ||
339 | current->comm,current->pid,(u16*)regs->pc,instruction); | 406 | current->comm,current->pid,(u16*)regs->pc,instruction); |
340 | } | 407 | } |
341 | 408 | ||
@@ -459,32 +526,58 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) | |||
459 | regs->pc += 2; | 526 | regs->pc += 2; |
460 | return ret; | 527 | return ret; |
461 | } | 528 | } |
529 | #endif /* CONFIG_CPU_SH2A */ | ||
530 | |||
531 | #ifdef CONFIG_CPU_HAS_SR_RB | ||
532 | #define lookup_exception_vector(x) \ | ||
533 | __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x))) | ||
534 | #else | ||
535 | #define lookup_exception_vector(x) \ | ||
536 | __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x))) | ||
537 | #endif | ||
462 | 538 | ||
463 | /* | 539 | /* |
464 | * Handle various address error exceptions | 540 | * Handle various address error exceptions: |
541 | * - instruction address error: | ||
542 | * misaligned PC | ||
543 | * PC >= 0x80000000 in user mode | ||
544 | * - data address error (read and write) | ||
545 | * misaligned data access | ||
546 | * access to >= 0x80000000 is user mode | ||
547 | * Unfortuntaly we can't distinguish between instruction address error | ||
548 | * and data address errors caused by read acceses. | ||
465 | */ | 549 | */ |
466 | asmlinkage void do_address_error(struct pt_regs *regs, | 550 | asmlinkage void do_address_error(struct pt_regs *regs, |
467 | unsigned long writeaccess, | 551 | unsigned long writeaccess, |
468 | unsigned long address) | 552 | unsigned long address) |
469 | { | 553 | { |
470 | unsigned long error_code; | 554 | unsigned long error_code = 0; |
471 | mm_segment_t oldfs; | 555 | mm_segment_t oldfs; |
556 | siginfo_t info; | ||
557 | #ifndef CONFIG_CPU_SH2A | ||
472 | u16 instruction; | 558 | u16 instruction; |
473 | int tmp; | 559 | int tmp; |
560 | #endif | ||
474 | 561 | ||
475 | asm volatile("stc r2_bank,%0": "=r" (error_code)); | 562 | /* Intentional ifdef */ |
563 | #ifdef CONFIG_CPU_HAS_SR_RB | ||
564 | lookup_exception_vector(error_code); | ||
565 | #endif | ||
476 | 566 | ||
477 | oldfs = get_fs(); | 567 | oldfs = get_fs(); |
478 | 568 | ||
479 | if (user_mode(regs)) { | 569 | if (user_mode(regs)) { |
570 | int si_code = BUS_ADRERR; | ||
571 | |||
480 | local_irq_enable(); | 572 | local_irq_enable(); |
481 | current->thread.error_code = error_code; | ||
482 | current->thread.trap_no = (writeaccess) ? 8 : 7; | ||
483 | 573 | ||
484 | /* bad PC is not something we can fix */ | 574 | /* bad PC is not something we can fix */ |
485 | if (regs->pc & 1) | 575 | if (regs->pc & 1) { |
576 | si_code = BUS_ADRALN; | ||
486 | goto uspace_segv; | 577 | goto uspace_segv; |
578 | } | ||
487 | 579 | ||
580 | #ifndef CONFIG_CPU_SH2A | ||
488 | set_fs(USER_DS); | 581 | set_fs(USER_DS); |
489 | if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { | 582 | if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { |
490 | /* Argh. Fault on the instruction itself. | 583 | /* Argh. Fault on the instruction itself. |
@@ -499,14 +592,23 @@ asmlinkage void do_address_error(struct pt_regs *regs, | |||
499 | 592 | ||
500 | if (tmp==0) | 593 | if (tmp==0) |
501 | return; /* sorted */ | 594 | return; /* sorted */ |
595 | #endif | ||
502 | 596 | ||
503 | uspace_segv: | 597 | uspace_segv: |
504 | printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm); | 598 | printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned " |
505 | force_sig(SIGSEGV, current); | 599 | "access (PC %lx PR %lx)\n", current->comm, regs->pc, |
600 | regs->pr); | ||
601 | |||
602 | info.si_signo = SIGBUS; | ||
603 | info.si_errno = 0; | ||
604 | info.si_code = si_code; | ||
605 | info.si_addr = (void *) address; | ||
606 | force_sig_info(SIGBUS, &info, current); | ||
506 | } else { | 607 | } else { |
507 | if (regs->pc & 1) | 608 | if (regs->pc & 1) |
508 | die("unaligned program counter", regs, error_code); | 609 | die("unaligned program counter", regs, error_code); |
509 | 610 | ||
611 | #ifndef CONFIG_CPU_SH2A | ||
510 | set_fs(KERNEL_DS); | 612 | set_fs(KERNEL_DS); |
511 | if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { | 613 | if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { |
512 | /* Argh. Fault on the instruction itself. | 614 | /* Argh. Fault on the instruction itself. |
@@ -518,6 +620,12 @@ asmlinkage void do_address_error(struct pt_regs *regs, | |||
518 | 620 | ||
519 | handle_unaligned_access(instruction, regs); | 621 | handle_unaligned_access(instruction, regs); |
520 | set_fs(oldfs); | 622 | set_fs(oldfs); |
623 | #else | ||
624 | printk(KERN_NOTICE "Killing process \"%s\" due to unaligned " | ||
625 | "access\n", current->comm); | ||
626 | |||
627 | force_sig(SIGSEGV, current); | ||
628 | #endif | ||
521 | } | 629 | } |
522 | } | 630 | } |
523 | 631 | ||
@@ -529,7 +637,7 @@ int is_dsp_inst(struct pt_regs *regs) | |||
529 | { | 637 | { |
530 | unsigned short inst; | 638 | unsigned short inst; |
531 | 639 | ||
532 | /* | 640 | /* |
533 | * Safe guard if DSP mode is already enabled or we're lacking | 641 | * Safe guard if DSP mode is already enabled or we're lacking |
534 | * the DSP altogether. | 642 | * the DSP altogether. |
535 | */ | 643 | */ |
@@ -550,24 +658,49 @@ int is_dsp_inst(struct pt_regs *regs) | |||
550 | #define is_dsp_inst(regs) (0) | 658 | #define is_dsp_inst(regs) (0) |
551 | #endif /* CONFIG_SH_DSP */ | 659 | #endif /* CONFIG_SH_DSP */ |
552 | 660 | ||
553 | extern int do_fpu_inst(unsigned short, struct pt_regs*); | 661 | #ifdef CONFIG_CPU_SH2A |
662 | asmlinkage void do_divide_error(unsigned long r4, unsigned long r5, | ||
663 | unsigned long r6, unsigned long r7, | ||
664 | struct pt_regs __regs) | ||
665 | { | ||
666 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
667 | siginfo_t info; | ||
668 | |||
669 | switch (r4) { | ||
670 | case TRAP_DIVZERO_ERROR: | ||
671 | info.si_code = FPE_INTDIV; | ||
672 | break; | ||
673 | case TRAP_DIVOVF_ERROR: | ||
674 | info.si_code = FPE_INTOVF; | ||
675 | break; | ||
676 | } | ||
677 | |||
678 | force_sig_info(SIGFPE, &info, current); | ||
679 | } | ||
680 | #endif | ||
681 | |||
682 | /* arch/sh/kernel/cpu/sh4/fpu.c */ | ||
683 | extern int do_fpu_inst(unsigned short, struct pt_regs *); | ||
684 | extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5, | ||
685 | unsigned long r6, unsigned long r7, struct pt_regs __regs); | ||
554 | 686 | ||
555 | asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, | 687 | asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, |
556 | unsigned long r6, unsigned long r7, | 688 | unsigned long r6, unsigned long r7, |
557 | struct pt_regs regs) | 689 | struct pt_regs __regs) |
558 | { | 690 | { |
691 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
559 | unsigned long error_code; | 692 | unsigned long error_code; |
560 | struct task_struct *tsk = current; | 693 | struct task_struct *tsk = current; |
561 | 694 | ||
562 | #ifdef CONFIG_SH_FPU_EMU | 695 | #ifdef CONFIG_SH_FPU_EMU |
563 | unsigned short inst; | 696 | unsigned short inst = 0; |
564 | int err; | 697 | int err; |
565 | 698 | ||
566 | get_user(inst, (unsigned short*)regs.pc); | 699 | get_user(inst, (unsigned short*)regs->pc); |
567 | 700 | ||
568 | err = do_fpu_inst(inst, ®s); | 701 | err = do_fpu_inst(inst, regs); |
569 | if (!err) { | 702 | if (!err) { |
570 | regs.pc += 2; | 703 | regs->pc += 2; |
571 | return; | 704 | return; |
572 | } | 705 | } |
573 | /* not a FPU inst. */ | 706 | /* not a FPU inst. */ |
@@ -575,20 +708,19 @@ asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, | |||
575 | 708 | ||
576 | #ifdef CONFIG_SH_DSP | 709 | #ifdef CONFIG_SH_DSP |
577 | /* Check if it's a DSP instruction */ | 710 | /* Check if it's a DSP instruction */ |
578 | if (is_dsp_inst(®s)) { | 711 | if (is_dsp_inst(regs)) { |
579 | /* Enable DSP mode, and restart instruction. */ | 712 | /* Enable DSP mode, and restart instruction. */ |
580 | regs.sr |= SR_DSP; | 713 | regs->sr |= SR_DSP; |
581 | return; | 714 | return; |
582 | } | 715 | } |
583 | #endif | 716 | #endif |
584 | 717 | ||
585 | asm volatile("stc r2_bank, %0": "=r" (error_code)); | 718 | lookup_exception_vector(error_code); |
719 | |||
586 | local_irq_enable(); | 720 | local_irq_enable(); |
587 | tsk->thread.error_code = error_code; | 721 | CHK_REMOTE_DEBUG(regs); |
588 | tsk->thread.trap_no = TRAP_RESERVED_INST; | ||
589 | CHK_REMOTE_DEBUG(®s); | ||
590 | force_sig(SIGILL, tsk); | 722 | force_sig(SIGILL, tsk); |
591 | die_if_no_fixup("reserved instruction", ®s, error_code); | 723 | die_if_no_fixup("reserved instruction", regs, error_code); |
592 | } | 724 | } |
593 | 725 | ||
594 | #ifdef CONFIG_SH_FPU_EMU | 726 | #ifdef CONFIG_SH_FPU_EMU |
@@ -636,39 +768,41 @@ static int emulate_branch(unsigned short inst, struct pt_regs* regs) | |||
636 | 768 | ||
637 | asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, | 769 | asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, |
638 | unsigned long r6, unsigned long r7, | 770 | unsigned long r6, unsigned long r7, |
639 | struct pt_regs regs) | 771 | struct pt_regs __regs) |
640 | { | 772 | { |
773 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
641 | unsigned long error_code; | 774 | unsigned long error_code; |
642 | struct task_struct *tsk = current; | 775 | struct task_struct *tsk = current; |
643 | #ifdef CONFIG_SH_FPU_EMU | 776 | #ifdef CONFIG_SH_FPU_EMU |
644 | unsigned short inst; | 777 | unsigned short inst = 0; |
645 | 778 | ||
646 | get_user(inst, (unsigned short *)regs.pc + 1); | 779 | get_user(inst, (unsigned short *)regs->pc + 1); |
647 | if (!do_fpu_inst(inst, ®s)) { | 780 | if (!do_fpu_inst(inst, regs)) { |
648 | get_user(inst, (unsigned short *)regs.pc); | 781 | get_user(inst, (unsigned short *)regs->pc); |
649 | if (!emulate_branch(inst, ®s)) | 782 | if (!emulate_branch(inst, regs)) |
650 | return; | 783 | return; |
651 | /* fault in branch.*/ | 784 | /* fault in branch.*/ |
652 | } | 785 | } |
653 | /* not a FPU inst. */ | 786 | /* not a FPU inst. */ |
654 | #endif | 787 | #endif |
655 | 788 | ||
656 | asm volatile("stc r2_bank, %0": "=r" (error_code)); | 789 | lookup_exception_vector(error_code); |
790 | |||
657 | local_irq_enable(); | 791 | local_irq_enable(); |
658 | tsk->thread.error_code = error_code; | 792 | CHK_REMOTE_DEBUG(regs); |
659 | tsk->thread.trap_no = TRAP_RESERVED_INST; | ||
660 | CHK_REMOTE_DEBUG(®s); | ||
661 | force_sig(SIGILL, tsk); | 793 | force_sig(SIGILL, tsk); |
662 | die_if_no_fixup("illegal slot instruction", ®s, error_code); | 794 | die_if_no_fixup("illegal slot instruction", regs, error_code); |
663 | } | 795 | } |
664 | 796 | ||
665 | asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, | 797 | asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, |
666 | unsigned long r6, unsigned long r7, | 798 | unsigned long r6, unsigned long r7, |
667 | struct pt_regs regs) | 799 | struct pt_regs __regs) |
668 | { | 800 | { |
801 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
669 | long ex; | 802 | long ex; |
670 | asm volatile("stc r2_bank, %0" : "=r" (ex)); | 803 | |
671 | die_if_kernel("exception", ®s, ex); | 804 | lookup_exception_vector(ex); |
805 | die_if_kernel("exception", regs, ex); | ||
672 | } | 806 | } |
673 | 807 | ||
674 | #if defined(CONFIG_SH_STANDARD_BIOS) | 808 | #if defined(CONFIG_SH_STANDARD_BIOS) |
@@ -709,14 +843,24 @@ void __init per_cpu_trap_init(void) | |||
709 | : "memory"); | 843 | : "memory"); |
710 | } | 844 | } |
711 | 845 | ||
712 | void __init trap_init(void) | 846 | void *set_exception_table_vec(unsigned int vec, void *handler) |
713 | { | 847 | { |
714 | extern void *exception_handling_table[]; | 848 | extern void *exception_handling_table[]; |
849 | void *old_handler; | ||
850 | |||
851 | old_handler = exception_handling_table[vec]; | ||
852 | exception_handling_table[vec] = handler; | ||
853 | return old_handler; | ||
854 | } | ||
715 | 855 | ||
716 | exception_handling_table[TRAP_RESERVED_INST] | 856 | extern asmlinkage void address_error_handler(unsigned long r4, unsigned long r5, |
717 | = (void *)do_reserved_inst; | 857 | unsigned long r6, unsigned long r7, |
718 | exception_handling_table[TRAP_ILLEGAL_SLOT_INST] | 858 | struct pt_regs __regs); |
719 | = (void *)do_illegal_slot_inst; | 859 | |
860 | void __init trap_init(void) | ||
861 | { | ||
862 | set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst); | ||
863 | set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst); | ||
720 | 864 | ||
721 | #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \ | 865 | #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \ |
722 | defined(CONFIG_SH_FPU_EMU) | 866 | defined(CONFIG_SH_FPU_EMU) |
@@ -725,61 +869,67 @@ void __init trap_init(void) | |||
725 | * reserved. They'll be handled in the math-emu case, or faulted on | 869 | * reserved. They'll be handled in the math-emu case, or faulted on |
726 | * otherwise. | 870 | * otherwise. |
727 | */ | 871 | */ |
728 | /* entry 64 corresponds to EXPEVT=0x800 */ | 872 | set_exception_table_evt(0x800, do_reserved_inst); |
729 | exception_handling_table[64] = (void *)do_reserved_inst; | 873 | set_exception_table_evt(0x820, do_illegal_slot_inst); |
730 | exception_handling_table[65] = (void *)do_illegal_slot_inst; | 874 | #elif defined(CONFIG_SH_FPU) |
875 | set_exception_table_evt(0x800, do_fpu_state_restore); | ||
876 | set_exception_table_evt(0x820, do_fpu_state_restore); | ||
731 | #endif | 877 | #endif |
732 | 878 | ||
879 | #ifdef CONFIG_CPU_SH2 | ||
880 | set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_handler); | ||
881 | #endif | ||
882 | #ifdef CONFIG_CPU_SH2A | ||
883 | set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error); | ||
884 | set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error); | ||
885 | #endif | ||
886 | |||
733 | /* Setup VBR for boot cpu */ | 887 | /* Setup VBR for boot cpu */ |
734 | per_cpu_trap_init(); | 888 | per_cpu_trap_init(); |
735 | } | 889 | } |
736 | 890 | ||
737 | void show_stack(struct task_struct *tsk, unsigned long *sp) | 891 | void show_trace(struct task_struct *tsk, unsigned long *sp, |
892 | struct pt_regs *regs) | ||
738 | { | 893 | { |
739 | unsigned long *stack, addr; | 894 | unsigned long addr; |
740 | unsigned long module_start = VMALLOC_START; | ||
741 | unsigned long module_end = VMALLOC_END; | ||
742 | int i = 1; | ||
743 | 895 | ||
744 | if (!tsk) | 896 | if (regs && user_mode(regs)) |
745 | tsk = current; | 897 | return; |
746 | if (tsk == current) | ||
747 | sp = (unsigned long *)current_stack_pointer; | ||
748 | else | ||
749 | sp = (unsigned long *)tsk->thread.sp; | ||
750 | |||
751 | stack = sp; | ||
752 | 898 | ||
753 | printk("\nCall trace: "); | 899 | printk("\nCall trace: "); |
754 | #ifdef CONFIG_KALLSYMS | 900 | #ifdef CONFIG_KALLSYMS |
755 | printk("\n"); | 901 | printk("\n"); |
756 | #endif | 902 | #endif |
757 | 903 | ||
758 | while (!kstack_end(stack)) { | 904 | while (!kstack_end(sp)) { |
759 | addr = *stack++; | 905 | addr = *sp++; |
760 | if (((addr >= (unsigned long)_text) && | 906 | if (kernel_text_address(addr)) |
761 | (addr <= (unsigned long)_etext)) || | 907 | print_ip_sym(addr); |
762 | ((addr >= module_start) && (addr <= module_end))) { | ||
763 | /* | ||
764 | * For 80-columns display, 6 entry is maximum. | ||
765 | * NOTE: '[<8c00abcd>] ' consumes 13 columns . | ||
766 | */ | ||
767 | #ifndef CONFIG_KALLSYMS | ||
768 | if (i && ((i % 6) == 0)) | ||
769 | printk("\n "); | ||
770 | #endif | ||
771 | printk("[<%08lx>] ", addr); | ||
772 | print_symbol("%s\n", addr); | ||
773 | i++; | ||
774 | } | ||
775 | } | 908 | } |
776 | 909 | ||
777 | printk("\n"); | 910 | printk("\n"); |
911 | |||
912 | if (!tsk) | ||
913 | tsk = current; | ||
914 | |||
915 | debug_show_held_locks(tsk); | ||
778 | } | 916 | } |
779 | 917 | ||
780 | void show_task(unsigned long *sp) | 918 | void show_stack(struct task_struct *tsk, unsigned long *sp) |
781 | { | 919 | { |
782 | show_stack(NULL, sp); | 920 | unsigned long stack; |
921 | |||
922 | if (!tsk) | ||
923 | tsk = current; | ||
924 | if (tsk == current) | ||
925 | sp = (unsigned long *)current_stack_pointer; | ||
926 | else | ||
927 | sp = (unsigned long *)tsk->thread.sp; | ||
928 | |||
929 | stack = (unsigned long)sp; | ||
930 | dump_mem("Stack: ", stack, THREAD_SIZE + | ||
931 | (unsigned long)task_stack_page(tsk)); | ||
932 | show_trace(tsk, sp, NULL); | ||
783 | } | 933 | } |
784 | 934 | ||
785 | void dump_stack(void) | 935 | void dump_stack(void) |