diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/sh/kernel/traps.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/sh/kernel/traps.c')
-rw-r--r-- | arch/sh/kernel/traps.c | 712 |
1 files changed, 712 insertions, 0 deletions
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c new file mode 100644 index 000000000000..7eb06719d844 --- /dev/null +++ b/arch/sh/kernel/traps.c | |||
@@ -0,0 +1,712 @@ | |||
1 | /* $Id: traps.c,v 1.17 2004/05/02 01:46:30 sugioka Exp $ | ||
2 | * | ||
3 | * linux/arch/sh/traps.c | ||
4 | * | ||
5 | * SuperH version: Copyright (C) 1999 Niibe Yutaka | ||
6 | * Copyright (C) 2000 Philipp Rumpf | ||
7 | * Copyright (C) 2000 David Howells | ||
8 | * Copyright (C) 2002, 2003 Paul Mundt | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * 'Traps.c' handles hardware traps and faults after we have saved some | ||
13 | * state in 'entry.S'. | ||
14 | */ | ||
15 | #include <linux/config.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/errno.h> | ||
20 | #include <linux/ptrace.h> | ||
21 | #include <linux/timer.h> | ||
22 | #include <linux/mm.h> | ||
23 | #include <linux/smp.h> | ||
24 | #include <linux/smp_lock.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/spinlock.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/kallsyms.h> | ||
30 | |||
31 | #include <asm/system.h> | ||
32 | #include <asm/uaccess.h> | ||
33 | #include <asm/io.h> | ||
34 | #include <asm/atomic.h> | ||
35 | #include <asm/processor.h> | ||
36 | #include <asm/sections.h> | ||
37 | |||
38 | #ifdef CONFIG_SH_KGDB | ||
39 | #include <asm/kgdb.h> | ||
40 | #define CHK_REMOTE_DEBUG(regs) \ | ||
41 | { \ | ||
42 | if ((kgdb_debug_hook != (kgdb_debug_hook_t *) NULL) && (!user_mode(regs))) \ | ||
43 | { \ | ||
44 | (*kgdb_debug_hook)(regs); \ | ||
45 | } \ | ||
46 | } | ||
47 | #else | ||
48 | #define CHK_REMOTE_DEBUG(regs) | ||
49 | #endif | ||
50 | |||
51 | #define DO_ERROR(trapnr, signr, str, name, tsk) \ | ||
52 | asmlinkage void do_##name(unsigned long r4, unsigned long r5, \ | ||
53 | unsigned long r6, unsigned long r7, \ | ||
54 | struct pt_regs regs) \ | ||
55 | { \ | ||
56 | unsigned long error_code; \ | ||
57 | \ | ||
58 | /* Check if it's a DSP instruction */ \ | ||
59 | if (is_dsp_inst(®s)) { \ | ||
60 | /* Enable DSP mode, and restart instruction. */ \ | ||
61 | regs.sr |= SR_DSP; \ | ||
62 | return; \ | ||
63 | } \ | ||
64 | \ | ||
65 | asm volatile("stc r2_bank, %0": "=r" (error_code)); \ | ||
66 | local_irq_enable(); \ | ||
67 | tsk->thread.error_code = error_code; \ | ||
68 | tsk->thread.trap_no = trapnr; \ | ||
69 | CHK_REMOTE_DEBUG(®s); \ | ||
70 | force_sig(signr, tsk); \ | ||
71 | die_if_no_fixup(str,®s,error_code); \ | ||
72 | } | ||
73 | |||
74 | #ifdef CONFIG_CPU_SH2 | ||
75 | #define TRAP_RESERVED_INST 4 | ||
76 | #define TRAP_ILLEGAL_SLOT_INST 6 | ||
77 | #else | ||
78 | #define TRAP_RESERVED_INST 12 | ||
79 | #define TRAP_ILLEGAL_SLOT_INST 13 | ||
80 | #endif | ||
81 | |||
82 | /* | ||
83 | * These constants are for searching for possible module text | ||
84 | * segments. VMALLOC_OFFSET comes from mm/vmalloc.c; MODULE_RANGE is | ||
85 | * a guess of how much space is likely to be vmalloced. | ||
86 | */ | ||
87 | #define VMALLOC_OFFSET (8*1024*1024) | ||
88 | #define MODULE_RANGE (8*1024*1024) | ||
89 | |||
90 | spinlock_t die_lock; | ||
91 | |||
92 | void die(const char * str, struct pt_regs * regs, long err) | ||
93 | { | ||
94 | static int die_counter; | ||
95 | |||
96 | console_verbose(); | ||
97 | spin_lock_irq(&die_lock); | ||
98 | printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); | ||
99 | CHK_REMOTE_DEBUG(regs); | ||
100 | show_regs(regs); | ||
101 | spin_unlock_irq(&die_lock); | ||
102 | do_exit(SIGSEGV); | ||
103 | } | ||
104 | |||
105 | static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err) | ||
106 | { | ||
107 | if (!user_mode(regs)) | ||
108 | die(str, regs, err); | ||
109 | } | ||
110 | |||
111 | static int handle_unaligned_notify_count = 10; | ||
112 | |||
113 | /* | ||
114 | * try and fix up kernelspace address errors | ||
115 | * - userspace errors just cause EFAULT to be returned, resulting in SEGV | ||
116 | * - kernel/userspace interfaces cause a jump to an appropriate handler | ||
117 | * - other kernel errors are bad | ||
118 | * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault | ||
119 | */ | ||
120 | static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err) | ||
121 | { | ||
122 | if (!user_mode(regs)) | ||
123 | { | ||
124 | const struct exception_table_entry *fixup; | ||
125 | fixup = search_exception_tables(regs->pc); | ||
126 | if (fixup) { | ||
127 | regs->pc = fixup->fixup; | ||
128 | return 0; | ||
129 | } | ||
130 | die(str, regs, err); | ||
131 | } | ||
132 | return -EFAULT; | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * handle an instruction that does an unaligned memory access by emulating the | ||
137 | * desired behaviour | ||
138 | * - note that PC _may not_ point to the faulting instruction | ||
139 | * (if that instruction is in a branch delay slot) | ||
140 | * - return 0 if emulation okay, -EFAULT on existential error | ||
141 | */ | ||
142 | static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) | ||
143 | { | ||
144 | int ret, index, count; | ||
145 | unsigned long *rm, *rn; | ||
146 | unsigned char *src, *dst; | ||
147 | |||
148 | index = (instruction>>8)&15; /* 0x0F00 */ | ||
149 | rn = ®s->regs[index]; | ||
150 | |||
151 | index = (instruction>>4)&15; /* 0x00F0 */ | ||
152 | rm = ®s->regs[index]; | ||
153 | |||
154 | count = 1<<(instruction&3); | ||
155 | |||
156 | ret = -EFAULT; | ||
157 | switch (instruction>>12) { | ||
158 | case 0: /* mov.[bwl] to/from memory via r0+rn */ | ||
159 | if (instruction & 8) { | ||
160 | /* from memory */ | ||
161 | src = (unsigned char*) *rm; | ||
162 | src += regs->regs[0]; | ||
163 | dst = (unsigned char*) rn; | ||
164 | *(unsigned long*)dst = 0; | ||
165 | |||
166 | #ifdef __LITTLE_ENDIAN__ | ||
167 | if (copy_from_user(dst, src, count)) | ||
168 | goto fetch_fault; | ||
169 | |||
170 | if ((count == 2) && dst[1] & 0x80) { | ||
171 | dst[2] = 0xff; | ||
172 | dst[3] = 0xff; | ||
173 | } | ||
174 | #else | ||
175 | dst += 4-count; | ||
176 | |||
177 | if (__copy_user(dst, src, count)) | ||
178 | goto fetch_fault; | ||
179 | |||
180 | if ((count == 2) && dst[2] & 0x80) { | ||
181 | dst[0] = 0xff; | ||
182 | dst[1] = 0xff; | ||
183 | } | ||
184 | #endif | ||
185 | } else { | ||
186 | /* to memory */ | ||
187 | src = (unsigned char*) rm; | ||
188 | #if !defined(__LITTLE_ENDIAN__) | ||
189 | src += 4-count; | ||
190 | #endif | ||
191 | dst = (unsigned char*) *rn; | ||
192 | dst += regs->regs[0]; | ||
193 | |||
194 | if (copy_to_user(dst, src, count)) | ||
195 | goto fetch_fault; | ||
196 | } | ||
197 | ret = 0; | ||
198 | break; | ||
199 | |||
200 | case 1: /* mov.l Rm,@(disp,Rn) */ | ||
201 | src = (unsigned char*) rm; | ||
202 | dst = (unsigned char*) *rn; | ||
203 | dst += (instruction&0x000F)<<2; | ||
204 | |||
205 | if (copy_to_user(dst,src,4)) | ||
206 | goto fetch_fault; | ||
207 | ret = 0; | ||
208 | break; | ||
209 | |||
210 | case 2: /* mov.[bwl] to memory, possibly with pre-decrement */ | ||
211 | if (instruction & 4) | ||
212 | *rn -= count; | ||
213 | src = (unsigned char*) rm; | ||
214 | dst = (unsigned char*) *rn; | ||
215 | #if !defined(__LITTLE_ENDIAN__) | ||
216 | src += 4-count; | ||
217 | #endif | ||
218 | if (copy_to_user(dst, src, count)) | ||
219 | goto fetch_fault; | ||
220 | ret = 0; | ||
221 | break; | ||
222 | |||
223 | case 5: /* mov.l @(disp,Rm),Rn */ | ||
224 | src = (unsigned char*) *rm; | ||
225 | src += (instruction&0x000F)<<2; | ||
226 | dst = (unsigned char*) rn; | ||
227 | *(unsigned long*)dst = 0; | ||
228 | |||
229 | if (copy_from_user(dst,src,4)) | ||
230 | goto fetch_fault; | ||
231 | ret = 0; | ||
232 | break; | ||
233 | |||
234 | case 6: /* mov.[bwl] from memory, possibly with post-increment */ | ||
235 | src = (unsigned char*) *rm; | ||
236 | if (instruction & 4) | ||
237 | *rm += count; | ||
238 | dst = (unsigned char*) rn; | ||
239 | *(unsigned long*)dst = 0; | ||
240 | |||
241 | #ifdef __LITTLE_ENDIAN__ | ||
242 | if (copy_from_user(dst, src, count)) | ||
243 | goto fetch_fault; | ||
244 | |||
245 | if ((count == 2) && dst[1] & 0x80) { | ||
246 | dst[2] = 0xff; | ||
247 | dst[3] = 0xff; | ||
248 | } | ||
249 | #else | ||
250 | dst += 4-count; | ||
251 | |||
252 | if (copy_from_user(dst, src, count)) | ||
253 | goto fetch_fault; | ||
254 | |||
255 | if ((count == 2) && dst[2] & 0x80) { | ||
256 | dst[0] = 0xff; | ||
257 | dst[1] = 0xff; | ||
258 | } | ||
259 | #endif | ||
260 | ret = 0; | ||
261 | break; | ||
262 | |||
263 | case 8: | ||
264 | switch ((instruction&0xFF00)>>8) { | ||
265 | case 0x81: /* mov.w R0,@(disp,Rn) */ | ||
266 | src = (unsigned char*) ®s->regs[0]; | ||
267 | #if !defined(__LITTLE_ENDIAN__) | ||
268 | src += 2; | ||
269 | #endif | ||
270 | dst = (unsigned char*) *rm; /* called Rn in the spec */ | ||
271 | dst += (instruction&0x000F)<<1; | ||
272 | |||
273 | if (copy_to_user(dst, src, 2)) | ||
274 | goto fetch_fault; | ||
275 | ret = 0; | ||
276 | break; | ||
277 | |||
278 | case 0x85: /* mov.w @(disp,Rm),R0 */ | ||
279 | src = (unsigned char*) *rm; | ||
280 | src += (instruction&0x000F)<<1; | ||
281 | dst = (unsigned char*) ®s->regs[0]; | ||
282 | *(unsigned long*)dst = 0; | ||
283 | |||
284 | #if !defined(__LITTLE_ENDIAN__) | ||
285 | dst += 2; | ||
286 | #endif | ||
287 | |||
288 | if (copy_from_user(dst, src, 2)) | ||
289 | goto fetch_fault; | ||
290 | |||
291 | #ifdef __LITTLE_ENDIAN__ | ||
292 | if (dst[1] & 0x80) { | ||
293 | dst[2] = 0xff; | ||
294 | dst[3] = 0xff; | ||
295 | } | ||
296 | #else | ||
297 | if (dst[2] & 0x80) { | ||
298 | dst[0] = 0xff; | ||
299 | dst[1] = 0xff; | ||
300 | } | ||
301 | #endif | ||
302 | ret = 0; | ||
303 | break; | ||
304 | } | ||
305 | break; | ||
306 | } | ||
307 | return ret; | ||
308 | |||
309 | fetch_fault: | ||
310 | /* Argh. Address not only misaligned but also non-existent. | ||
311 | * Raise an EFAULT and see if it's trapped | ||
312 | */ | ||
313 | return die_if_no_fixup("Fault in unaligned fixup", regs, 0); | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | * emulate the instruction in the delay slot | ||
318 | * - fetches the instruction from PC+2 | ||
319 | */ | ||
320 | static inline int handle_unaligned_delayslot(struct pt_regs *regs) | ||
321 | { | ||
322 | u16 instruction; | ||
323 | |||
324 | if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) { | ||
325 | /* the instruction-fetch faulted */ | ||
326 | if (user_mode(regs)) | ||
327 | return -EFAULT; | ||
328 | |||
329 | /* kernel */ | ||
330 | die("delay-slot-insn faulting in handle_unaligned_delayslot", regs, 0); | ||
331 | } | ||
332 | |||
333 | return handle_unaligned_ins(instruction,regs); | ||
334 | } | ||
335 | |||
336 | /* | ||
337 | * handle an instruction that does an unaligned memory access | ||
338 | * - have to be careful of branch delay-slot instructions that fault | ||
339 | * SH3: | ||
340 | * - if the branch would be taken PC points to the branch | ||
341 | * - if the branch would not be taken, PC points to delay-slot | ||
342 | * SH4: | ||
343 | * - PC always points to delayed branch | ||
344 | * - return 0 if handled, -EFAULT if failed (may not return if in kernel) | ||
345 | */ | ||
346 | |||
347 | /* Macros to determine offset from current PC for branch instructions */ | ||
348 | /* Explicit type coercion is used to force sign extension where needed */ | ||
349 | #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4) | ||
350 | #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4) | ||
351 | |||
352 | static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) | ||
353 | { | ||
354 | u_int rm; | ||
355 | int ret, index; | ||
356 | |||
357 | index = (instruction>>8)&15; /* 0x0F00 */ | ||
358 | rm = regs->regs[index]; | ||
359 | |||
360 | /* shout about the first ten userspace fixups */ | ||
361 | if (user_mode(regs) && handle_unaligned_notify_count>0) { | ||
362 | handle_unaligned_notify_count--; | ||
363 | |||
364 | printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", | ||
365 | current->comm,current->pid,(u16*)regs->pc,instruction); | ||
366 | } | ||
367 | |||
368 | ret = -EFAULT; | ||
369 | switch (instruction&0xF000) { | ||
370 | case 0x0000: | ||
371 | if (instruction==0x000B) { | ||
372 | /* rts */ | ||
373 | ret = handle_unaligned_delayslot(regs); | ||
374 | if (ret==0) | ||
375 | regs->pc = regs->pr; | ||
376 | } | ||
377 | else if ((instruction&0x00FF)==0x0023) { | ||
378 | /* braf @Rm */ | ||
379 | ret = handle_unaligned_delayslot(regs); | ||
380 | if (ret==0) | ||
381 | regs->pc += rm + 4; | ||
382 | } | ||
383 | else if ((instruction&0x00FF)==0x0003) { | ||
384 | /* bsrf @Rm */ | ||
385 | ret = handle_unaligned_delayslot(regs); | ||
386 | if (ret==0) { | ||
387 | regs->pr = regs->pc + 4; | ||
388 | regs->pc += rm + 4; | ||
389 | } | ||
390 | } | ||
391 | else { | ||
392 | /* mov.[bwl] to/from memory via r0+rn */ | ||
393 | goto simple; | ||
394 | } | ||
395 | break; | ||
396 | |||
397 | case 0x1000: /* mov.l Rm,@(disp,Rn) */ | ||
398 | goto simple; | ||
399 | |||
400 | case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */ | ||
401 | goto simple; | ||
402 | |||
403 | case 0x4000: | ||
404 | if ((instruction&0x00FF)==0x002B) { | ||
405 | /* jmp @Rm */ | ||
406 | ret = handle_unaligned_delayslot(regs); | ||
407 | if (ret==0) | ||
408 | regs->pc = rm; | ||
409 | } | ||
410 | else if ((instruction&0x00FF)==0x000B) { | ||
411 | /* jsr @Rm */ | ||
412 | ret = handle_unaligned_delayslot(regs); | ||
413 | if (ret==0) { | ||
414 | regs->pr = regs->pc + 4; | ||
415 | regs->pc = rm; | ||
416 | } | ||
417 | } | ||
418 | else { | ||
419 | /* mov.[bwl] to/from memory via r0+rn */ | ||
420 | goto simple; | ||
421 | } | ||
422 | break; | ||
423 | |||
424 | case 0x5000: /* mov.l @(disp,Rm),Rn */ | ||
425 | goto simple; | ||
426 | |||
427 | case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */ | ||
428 | goto simple; | ||
429 | |||
430 | case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */ | ||
431 | switch (instruction&0x0F00) { | ||
432 | case 0x0100: /* mov.w R0,@(disp,Rm) */ | ||
433 | goto simple; | ||
434 | case 0x0500: /* mov.w @(disp,Rm),R0 */ | ||
435 | goto simple; | ||
436 | case 0x0B00: /* bf lab - no delayslot*/ | ||
437 | break; | ||
438 | case 0x0F00: /* bf/s lab */ | ||
439 | ret = handle_unaligned_delayslot(regs); | ||
440 | if (ret==0) { | ||
441 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) | ||
442 | if ((regs->sr & 0x00000001) != 0) | ||
443 | regs->pc += 4; /* next after slot */ | ||
444 | else | ||
445 | #endif | ||
446 | regs->pc += SH_PC_8BIT_OFFSET(instruction); | ||
447 | } | ||
448 | break; | ||
449 | case 0x0900: /* bt lab - no delayslot */ | ||
450 | break; | ||
451 | case 0x0D00: /* bt/s lab */ | ||
452 | ret = handle_unaligned_delayslot(regs); | ||
453 | if (ret==0) { | ||
454 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) | ||
455 | if ((regs->sr & 0x00000001) == 0) | ||
456 | regs->pc += 4; /* next after slot */ | ||
457 | else | ||
458 | #endif | ||
459 | regs->pc += SH_PC_8BIT_OFFSET(instruction); | ||
460 | } | ||
461 | break; | ||
462 | } | ||
463 | break; | ||
464 | |||
465 | case 0xA000: /* bra label */ | ||
466 | ret = handle_unaligned_delayslot(regs); | ||
467 | if (ret==0) | ||
468 | regs->pc += SH_PC_12BIT_OFFSET(instruction); | ||
469 | break; | ||
470 | |||
471 | case 0xB000: /* bsr label */ | ||
472 | ret = handle_unaligned_delayslot(regs); | ||
473 | if (ret==0) { | ||
474 | regs->pr = regs->pc + 4; | ||
475 | regs->pc += SH_PC_12BIT_OFFSET(instruction); | ||
476 | } | ||
477 | break; | ||
478 | } | ||
479 | return ret; | ||
480 | |||
481 | /* handle non-delay-slot instruction */ | ||
482 | simple: | ||
483 | ret = handle_unaligned_ins(instruction,regs); | ||
484 | if (ret==0) | ||
485 | regs->pc += 2; | ||
486 | return ret; | ||
487 | } | ||
488 | |||
489 | /* | ||
490 | * Handle various address error exceptions | ||
491 | */ | ||
492 | asmlinkage void do_address_error(struct pt_regs *regs, | ||
493 | unsigned long writeaccess, | ||
494 | unsigned long address) | ||
495 | { | ||
496 | unsigned long error_code; | ||
497 | mm_segment_t oldfs; | ||
498 | u16 instruction; | ||
499 | int tmp; | ||
500 | |||
501 | asm volatile("stc r2_bank,%0": "=r" (error_code)); | ||
502 | |||
503 | oldfs = get_fs(); | ||
504 | |||
505 | if (user_mode(regs)) { | ||
506 | local_irq_enable(); | ||
507 | current->thread.error_code = error_code; | ||
508 | current->thread.trap_no = (writeaccess) ? 8 : 7; | ||
509 | |||
510 | /* bad PC is not something we can fix */ | ||
511 | if (regs->pc & 1) | ||
512 | goto uspace_segv; | ||
513 | |||
514 | set_fs(USER_DS); | ||
515 | if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { | ||
516 | /* Argh. Fault on the instruction itself. | ||
517 | This should never happen non-SMP | ||
518 | */ | ||
519 | set_fs(oldfs); | ||
520 | goto uspace_segv; | ||
521 | } | ||
522 | |||
523 | tmp = handle_unaligned_access(instruction, regs); | ||
524 | set_fs(oldfs); | ||
525 | |||
526 | if (tmp==0) | ||
527 | return; /* sorted */ | ||
528 | |||
529 | uspace_segv: | ||
530 | printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm); | ||
531 | force_sig(SIGSEGV, current); | ||
532 | } else { | ||
533 | if (regs->pc & 1) | ||
534 | die("unaligned program counter", regs, error_code); | ||
535 | |||
536 | set_fs(KERNEL_DS); | ||
537 | if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { | ||
538 | /* Argh. Fault on the instruction itself. | ||
539 | This should never happen non-SMP | ||
540 | */ | ||
541 | set_fs(oldfs); | ||
542 | die("insn faulting in do_address_error", regs, 0); | ||
543 | } | ||
544 | |||
545 | handle_unaligned_access(instruction, regs); | ||
546 | set_fs(oldfs); | ||
547 | } | ||
548 | } | ||
549 | |||
550 | #ifdef CONFIG_SH_DSP | ||
551 | /* | ||
552 | * SH-DSP support gerg@snapgear.com. | ||
553 | */ | ||
554 | int is_dsp_inst(struct pt_regs *regs) | ||
555 | { | ||
556 | unsigned short inst; | ||
557 | |||
558 | /* | ||
559 | * Safe guard if DSP mode is already enabled or we're lacking | ||
560 | * the DSP altogether. | ||
561 | */ | ||
562 | if (!(cpu_data->flags & CPU_HAS_DSP) || (regs->sr & SR_DSP)) | ||
563 | return 0; | ||
564 | |||
565 | get_user(inst, ((unsigned short *) regs->pc)); | ||
566 | |||
567 | inst &= 0xf000; | ||
568 | |||
569 | /* Check for any type of DSP or support instruction */ | ||
570 | if ((inst == 0xf000) || (inst == 0x4000)) | ||
571 | return 1; | ||
572 | |||
573 | return 0; | ||
574 | } | ||
575 | #else | ||
576 | #define is_dsp_inst(regs) (0) | ||
577 | #endif /* CONFIG_SH_DSP */ | ||
578 | |||
579 | DO_ERROR(TRAP_RESERVED_INST, SIGILL, "reserved instruction", reserved_inst, current) | ||
580 | DO_ERROR(TRAP_ILLEGAL_SLOT_INST, SIGILL, "illegal slot instruction", illegal_slot_inst, current) | ||
581 | |||
582 | asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, | ||
583 | unsigned long r6, unsigned long r7, | ||
584 | struct pt_regs regs) | ||
585 | { | ||
586 | long ex; | ||
587 | asm volatile("stc r2_bank, %0" : "=r" (ex)); | ||
588 | die_if_kernel("exception", ®s, ex); | ||
589 | } | ||
590 | |||
591 | #if defined(CONFIG_SH_STANDARD_BIOS) | ||
592 | void *gdb_vbr_vector; | ||
593 | |||
594 | static inline void __init gdb_vbr_init(void) | ||
595 | { | ||
596 | register unsigned long vbr; | ||
597 | |||
598 | /* | ||
599 | * Read the old value of the VBR register to initialise | ||
600 | * the vector through which debug and BIOS traps are | ||
601 | * delegated by the Linux trap handler. | ||
602 | */ | ||
603 | asm volatile("stc vbr, %0" : "=r" (vbr)); | ||
604 | |||
605 | gdb_vbr_vector = (void *)(vbr + 0x100); | ||
606 | printk("Setting GDB trap vector to 0x%08lx\n", | ||
607 | (unsigned long)gdb_vbr_vector); | ||
608 | } | ||
609 | #endif | ||
610 | |||
611 | void __init per_cpu_trap_init(void) | ||
612 | { | ||
613 | extern void *vbr_base; | ||
614 | |||
615 | #ifdef CONFIG_SH_STANDARD_BIOS | ||
616 | gdb_vbr_init(); | ||
617 | #endif | ||
618 | |||
619 | /* NOTE: The VBR value should be at P1 | ||
620 | (or P2, virtural "fixed" address space). | ||
621 | It's definitely should not in physical address. */ | ||
622 | |||
623 | asm volatile("ldc %0, vbr" | ||
624 | : /* no output */ | ||
625 | : "r" (&vbr_base) | ||
626 | : "memory"); | ||
627 | } | ||
628 | |||
629 | void __init trap_init(void) | ||
630 | { | ||
631 | extern void *exception_handling_table[]; | ||
632 | |||
633 | exception_handling_table[TRAP_RESERVED_INST] | ||
634 | = (void *)do_reserved_inst; | ||
635 | exception_handling_table[TRAP_ILLEGAL_SLOT_INST] | ||
636 | = (void *)do_illegal_slot_inst; | ||
637 | |||
638 | #ifdef CONFIG_CPU_SH4 | ||
639 | if (!(cpu_data->flags & CPU_HAS_FPU)) { | ||
640 | /* For SH-4 lacking an FPU, treat floating point instructions | ||
641 | as reserved. */ | ||
642 | /* entry 64 corresponds to EXPEVT=0x800 */ | ||
643 | exception_handling_table[64] = (void *)do_reserved_inst; | ||
644 | exception_handling_table[65] = (void *)do_illegal_slot_inst; | ||
645 | } | ||
646 | #endif | ||
647 | |||
648 | /* Setup VBR for boot cpu */ | ||
649 | per_cpu_trap_init(); | ||
650 | } | ||
651 | |||
652 | void show_stack(struct task_struct *tsk, unsigned long *sp) | ||
653 | { | ||
654 | unsigned long *stack, addr; | ||
655 | unsigned long module_start = VMALLOC_START; | ||
656 | unsigned long module_end = VMALLOC_END; | ||
657 | int i = 1; | ||
658 | |||
659 | if (tsk && !sp) { | ||
660 | sp = (unsigned long *)tsk->thread.sp; | ||
661 | } | ||
662 | |||
663 | if (!sp) { | ||
664 | __asm__ __volatile__ ( | ||
665 | "mov r15, %0\n\t" | ||
666 | "stc r7_bank, %1\n\t" | ||
667 | : "=r" (module_start), | ||
668 | "=r" (module_end) | ||
669 | ); | ||
670 | |||
671 | sp = (unsigned long *)module_start; | ||
672 | } | ||
673 | |||
674 | stack = sp; | ||
675 | |||
676 | printk("\nCall trace: "); | ||
677 | #ifdef CONFIG_KALLSYMS | ||
678 | printk("\n"); | ||
679 | #endif | ||
680 | |||
681 | while (!kstack_end(stack)) { | ||
682 | addr = *stack++; | ||
683 | if (((addr >= (unsigned long)_text) && | ||
684 | (addr <= (unsigned long)_etext)) || | ||
685 | ((addr >= module_start) && (addr <= module_end))) { | ||
686 | /* | ||
687 | * For 80-columns display, 6 entry is maximum. | ||
688 | * NOTE: '[<8c00abcd>] ' consumes 13 columns . | ||
689 | */ | ||
690 | #ifndef CONFIG_KALLSYMS | ||
691 | if (i && ((i % 6) == 0)) | ||
692 | printk("\n "); | ||
693 | #endif | ||
694 | printk("[<%08lx>] ", addr); | ||
695 | print_symbol("%s\n", addr); | ||
696 | i++; | ||
697 | } | ||
698 | } | ||
699 | |||
700 | printk("\n"); | ||
701 | } | ||
702 | |||
703 | void show_task(unsigned long *sp) | ||
704 | { | ||
705 | show_stack(NULL, sp); | ||
706 | } | ||
707 | |||
708 | void dump_stack(void) | ||
709 | { | ||
710 | show_stack(NULL, NULL); | ||
711 | } | ||
712 | EXPORT_SYMBOL(dump_stack); | ||