diff options
Diffstat (limited to 'arch/mips/kernel/traps.c')
-rw-r--r-- | arch/mips/kernel/traps.c | 499 |
1 files changed, 399 insertions, 100 deletions
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index a53b1ed7b386..6f3ff9690686 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * Copyright (C) 1999 Silicon Graphics, Inc. | 9 | * Copyright (C) 1999 Silicon Graphics, Inc. |
10 | * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com | 10 | * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com |
11 | * Copyright (C) 2000, 01 MIPS Technologies, Inc. | 11 | * Copyright (C) 2000, 01 MIPS Technologies, Inc. |
12 | * Copyright (C) 2002, 2003, 2004 Maciej W. Rozycki | 12 | * Copyright (C) 2002, 2003, 2004, 2005 Maciej W. Rozycki |
13 | */ | 13 | */ |
14 | #include <linux/config.h> | 14 | #include <linux/config.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
@@ -20,12 +20,16 @@ | |||
20 | #include <linux/smp_lock.h> | 20 | #include <linux/smp_lock.h> |
21 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
22 | #include <linux/kallsyms.h> | 22 | #include <linux/kallsyms.h> |
23 | #include <linux/bootmem.h> | ||
23 | 24 | ||
24 | #include <asm/bootinfo.h> | 25 | #include <asm/bootinfo.h> |
25 | #include <asm/branch.h> | 26 | #include <asm/branch.h> |
26 | #include <asm/break.h> | 27 | #include <asm/break.h> |
27 | #include <asm/cpu.h> | 28 | #include <asm/cpu.h> |
29 | #include <asm/dsp.h> | ||
28 | #include <asm/fpu.h> | 30 | #include <asm/fpu.h> |
31 | #include <asm/mipsregs.h> | ||
32 | #include <asm/mipsmtregs.h> | ||
29 | #include <asm/module.h> | 33 | #include <asm/module.h> |
30 | #include <asm/pgtable.h> | 34 | #include <asm/pgtable.h> |
31 | #include <asm/ptrace.h> | 35 | #include <asm/ptrace.h> |
@@ -54,14 +58,19 @@ extern asmlinkage void handle_tr(void); | |||
54 | extern asmlinkage void handle_fpe(void); | 58 | extern asmlinkage void handle_fpe(void); |
55 | extern asmlinkage void handle_mdmx(void); | 59 | extern asmlinkage void handle_mdmx(void); |
56 | extern asmlinkage void handle_watch(void); | 60 | extern asmlinkage void handle_watch(void); |
61 | extern asmlinkage void handle_mt(void); | ||
62 | extern asmlinkage void handle_dsp(void); | ||
57 | extern asmlinkage void handle_mcheck(void); | 63 | extern asmlinkage void handle_mcheck(void); |
58 | extern asmlinkage void handle_reserved(void); | 64 | extern asmlinkage void handle_reserved(void); |
59 | 65 | ||
60 | extern int fpu_emulator_cop1Handler(int xcptno, struct pt_regs *xcp, | 66 | extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, |
61 | struct mips_fpu_soft_struct *ctx); | 67 | struct mips_fpu_soft_struct *ctx); |
62 | 68 | ||
63 | void (*board_be_init)(void); | 69 | void (*board_be_init)(void); |
64 | int (*board_be_handler)(struct pt_regs *regs, int is_fixup); | 70 | int (*board_be_handler)(struct pt_regs *regs, int is_fixup); |
71 | void (*board_nmi_handler_setup)(void); | ||
72 | void (*board_ejtag_handler_setup)(void); | ||
73 | void (*board_bind_eic_interrupt)(int irq, int regset); | ||
65 | 74 | ||
66 | /* | 75 | /* |
67 | * These constant is for searching for possible module text segments. | 76 | * These constant is for searching for possible module text segments. |
@@ -201,32 +210,47 @@ void show_regs(struct pt_regs *regs) | |||
201 | 210 | ||
202 | printk("Status: %08x ", (uint32_t) regs->cp0_status); | 211 | printk("Status: %08x ", (uint32_t) regs->cp0_status); |
203 | 212 | ||
204 | if (regs->cp0_status & ST0_KX) | 213 | if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) { |
205 | printk("KX "); | 214 | if (regs->cp0_status & ST0_KUO) |
206 | if (regs->cp0_status & ST0_SX) | 215 | printk("KUo "); |
207 | printk("SX "); | 216 | if (regs->cp0_status & ST0_IEO) |
208 | if (regs->cp0_status & ST0_UX) | 217 | printk("IEo "); |
209 | printk("UX "); | 218 | if (regs->cp0_status & ST0_KUP) |
210 | switch (regs->cp0_status & ST0_KSU) { | 219 | printk("KUp "); |
211 | case KSU_USER: | 220 | if (regs->cp0_status & ST0_IEP) |
212 | printk("USER "); | 221 | printk("IEp "); |
213 | break; | 222 | if (regs->cp0_status & ST0_KUC) |
214 | case KSU_SUPERVISOR: | 223 | printk("KUc "); |
215 | printk("SUPERVISOR "); | 224 | if (regs->cp0_status & ST0_IEC) |
216 | break; | 225 | printk("IEc "); |
217 | case KSU_KERNEL: | 226 | } else { |
218 | printk("KERNEL "); | 227 | if (regs->cp0_status & ST0_KX) |
219 | break; | 228 | printk("KX "); |
220 | default: | 229 | if (regs->cp0_status & ST0_SX) |
221 | printk("BAD_MODE "); | 230 | printk("SX "); |
222 | break; | 231 | if (regs->cp0_status & ST0_UX) |
232 | printk("UX "); | ||
233 | switch (regs->cp0_status & ST0_KSU) { | ||
234 | case KSU_USER: | ||
235 | printk("USER "); | ||
236 | break; | ||
237 | case KSU_SUPERVISOR: | ||
238 | printk("SUPERVISOR "); | ||
239 | break; | ||
240 | case KSU_KERNEL: | ||
241 | printk("KERNEL "); | ||
242 | break; | ||
243 | default: | ||
244 | printk("BAD_MODE "); | ||
245 | break; | ||
246 | } | ||
247 | if (regs->cp0_status & ST0_ERL) | ||
248 | printk("ERL "); | ||
249 | if (regs->cp0_status & ST0_EXL) | ||
250 | printk("EXL "); | ||
251 | if (regs->cp0_status & ST0_IE) | ||
252 | printk("IE "); | ||
223 | } | 253 | } |
224 | if (regs->cp0_status & ST0_ERL) | ||
225 | printk("ERL "); | ||
226 | if (regs->cp0_status & ST0_EXL) | ||
227 | printk("EXL "); | ||
228 | if (regs->cp0_status & ST0_IE) | ||
229 | printk("IE "); | ||
230 | printk("\n"); | 254 | printk("\n"); |
231 | 255 | ||
232 | printk("Cause : %08x\n", cause); | 256 | printk("Cause : %08x\n", cause); |
@@ -252,29 +276,18 @@ void show_registers(struct pt_regs *regs) | |||
252 | 276 | ||
253 | static DEFINE_SPINLOCK(die_lock); | 277 | static DEFINE_SPINLOCK(die_lock); |
254 | 278 | ||
255 | NORET_TYPE void __die(const char * str, struct pt_regs * regs, | 279 | NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs) |
256 | const char * file, const char * func, unsigned long line) | ||
257 | { | 280 | { |
258 | static int die_counter; | 281 | static int die_counter; |
259 | 282 | ||
260 | console_verbose(); | 283 | console_verbose(); |
261 | spin_lock_irq(&die_lock); | 284 | spin_lock_irq(&die_lock); |
262 | printk("%s", str); | 285 | printk("%s[#%d]:\n", str, ++die_counter); |
263 | if (file && func) | ||
264 | printk(" in %s:%s, line %ld", file, func, line); | ||
265 | printk("[#%d]:\n", ++die_counter); | ||
266 | show_registers(regs); | 286 | show_registers(regs); |
267 | spin_unlock_irq(&die_lock); | 287 | spin_unlock_irq(&die_lock); |
268 | do_exit(SIGSEGV); | 288 | do_exit(SIGSEGV); |
269 | } | 289 | } |
270 | 290 | ||
271 | void __die_if_kernel(const char * str, struct pt_regs * regs, | ||
272 | const char * file, const char * func, unsigned long line) | ||
273 | { | ||
274 | if (!user_mode(regs)) | ||
275 | __die(str, regs, file, func, line); | ||
276 | } | ||
277 | |||
278 | extern const struct exception_table_entry __start___dbe_table[]; | 291 | extern const struct exception_table_entry __start___dbe_table[]; |
279 | extern const struct exception_table_entry __stop___dbe_table[]; | 292 | extern const struct exception_table_entry __stop___dbe_table[]; |
280 | 293 | ||
@@ -339,9 +352,9 @@ asmlinkage void do_be(struct pt_regs *regs) | |||
339 | 352 | ||
340 | static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode) | 353 | static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode) |
341 | { | 354 | { |
342 | unsigned int *epc; | 355 | unsigned int __user *epc; |
343 | 356 | ||
344 | epc = (unsigned int *) regs->cp0_epc + | 357 | epc = (unsigned int __user *) regs->cp0_epc + |
345 | ((regs->cp0_cause & CAUSEF_BD) != 0); | 358 | ((regs->cp0_cause & CAUSEF_BD) != 0); |
346 | if (!get_user(*opcode, epc)) | 359 | if (!get_user(*opcode, epc)) |
347 | return 0; | 360 | return 0; |
@@ -360,6 +373,10 @@ static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode) | |||
360 | #define OFFSET 0x0000ffff | 373 | #define OFFSET 0x0000ffff |
361 | #define LL 0xc0000000 | 374 | #define LL 0xc0000000 |
362 | #define SC 0xe0000000 | 375 | #define SC 0xe0000000 |
376 | #define SPEC3 0x7c000000 | ||
377 | #define RD 0x0000f800 | ||
378 | #define FUNC 0x0000003f | ||
379 | #define RDHWR 0x0000003b | ||
363 | 380 | ||
364 | /* | 381 | /* |
365 | * The ll_bit is cleared by r*_switch.S | 382 | * The ll_bit is cleared by r*_switch.S |
@@ -371,7 +388,7 @@ static struct task_struct *ll_task = NULL; | |||
371 | 388 | ||
372 | static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode) | 389 | static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode) |
373 | { | 390 | { |
374 | unsigned long value, *vaddr; | 391 | unsigned long value, __user *vaddr; |
375 | long offset; | 392 | long offset; |
376 | int signal = 0; | 393 | int signal = 0; |
377 | 394 | ||
@@ -385,7 +402,8 @@ static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode) | |||
385 | offset <<= 16; | 402 | offset <<= 16; |
386 | offset >>= 16; | 403 | offset >>= 16; |
387 | 404 | ||
388 | vaddr = (unsigned long *)((long)(regs->regs[(opcode & BASE) >> 21]) + offset); | 405 | vaddr = (unsigned long __user *) |
406 | ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); | ||
389 | 407 | ||
390 | if ((unsigned long)vaddr & 3) { | 408 | if ((unsigned long)vaddr & 3) { |
391 | signal = SIGBUS; | 409 | signal = SIGBUS; |
@@ -407,9 +425,10 @@ static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode) | |||
407 | 425 | ||
408 | preempt_enable(); | 426 | preempt_enable(); |
409 | 427 | ||
428 | compute_return_epc(regs); | ||
429 | |||
410 | regs->regs[(opcode & RT) >> 16] = value; | 430 | regs->regs[(opcode & RT) >> 16] = value; |
411 | 431 | ||
412 | compute_return_epc(regs); | ||
413 | return; | 432 | return; |
414 | 433 | ||
415 | sig: | 434 | sig: |
@@ -418,7 +437,8 @@ sig: | |||
418 | 437 | ||
419 | static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode) | 438 | static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode) |
420 | { | 439 | { |
421 | unsigned long *vaddr, reg; | 440 | unsigned long __user *vaddr; |
441 | unsigned long reg; | ||
422 | long offset; | 442 | long offset; |
423 | int signal = 0; | 443 | int signal = 0; |
424 | 444 | ||
@@ -432,7 +452,8 @@ static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode) | |||
432 | offset <<= 16; | 452 | offset <<= 16; |
433 | offset >>= 16; | 453 | offset >>= 16; |
434 | 454 | ||
435 | vaddr = (unsigned long *)((long)(regs->regs[(opcode & BASE) >> 21]) + offset); | 455 | vaddr = (unsigned long __user *) |
456 | ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); | ||
436 | reg = (opcode & RT) >> 16; | 457 | reg = (opcode & RT) >> 16; |
437 | 458 | ||
438 | if ((unsigned long)vaddr & 3) { | 459 | if ((unsigned long)vaddr & 3) { |
@@ -443,9 +464,9 @@ static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode) | |||
443 | preempt_disable(); | 464 | preempt_disable(); |
444 | 465 | ||
445 | if (ll_bit == 0 || ll_task != current) { | 466 | if (ll_bit == 0 || ll_task != current) { |
467 | compute_return_epc(regs); | ||
446 | regs->regs[reg] = 0; | 468 | regs->regs[reg] = 0; |
447 | preempt_enable(); | 469 | preempt_enable(); |
448 | compute_return_epc(regs); | ||
449 | return; | 470 | return; |
450 | } | 471 | } |
451 | 472 | ||
@@ -456,9 +477,9 @@ static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode) | |||
456 | goto sig; | 477 | goto sig; |
457 | } | 478 | } |
458 | 479 | ||
480 | compute_return_epc(regs); | ||
459 | regs->regs[reg] = 1; | 481 | regs->regs[reg] = 1; |
460 | 482 | ||
461 | compute_return_epc(regs); | ||
462 | return; | 483 | return; |
463 | 484 | ||
464 | sig: | 485 | sig: |
@@ -491,6 +512,37 @@ static inline int simulate_llsc(struct pt_regs *regs) | |||
491 | return -EFAULT; /* Strange things going on ... */ | 512 | return -EFAULT; /* Strange things going on ... */ |
492 | } | 513 | } |
493 | 514 | ||
515 | /* | ||
516 | * Simulate trapping 'rdhwr' instructions to provide user accessible | ||
517 | * registers not implemented in hardware. The only current use of this | ||
518 | * is the thread area pointer. | ||
519 | */ | ||
520 | static inline int simulate_rdhwr(struct pt_regs *regs) | ||
521 | { | ||
522 | struct thread_info *ti = current->thread_info; | ||
523 | unsigned int opcode; | ||
524 | |||
525 | if (unlikely(get_insn_opcode(regs, &opcode))) | ||
526 | return -EFAULT; | ||
527 | |||
528 | if (unlikely(compute_return_epc(regs))) | ||
529 | return -EFAULT; | ||
530 | |||
531 | if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { | ||
532 | int rd = (opcode & RD) >> 11; | ||
533 | int rt = (opcode & RT) >> 16; | ||
534 | switch (rd) { | ||
535 | case 29: | ||
536 | regs->regs[rt] = ti->tp_value; | ||
537 | break; | ||
538 | default: | ||
539 | return -EFAULT; | ||
540 | } | ||
541 | } | ||
542 | |||
543 | return 0; | ||
544 | } | ||
545 | |||
494 | asmlinkage void do_ov(struct pt_regs *regs) | 546 | asmlinkage void do_ov(struct pt_regs *regs) |
495 | { | 547 | { |
496 | siginfo_t info; | 548 | siginfo_t info; |
@@ -498,7 +550,7 @@ asmlinkage void do_ov(struct pt_regs *regs) | |||
498 | info.si_code = FPE_INTOVF; | 550 | info.si_code = FPE_INTOVF; |
499 | info.si_signo = SIGFPE; | 551 | info.si_signo = SIGFPE; |
500 | info.si_errno = 0; | 552 | info.si_errno = 0; |
501 | info.si_addr = (void *)regs->cp0_epc; | 553 | info.si_addr = (void __user *) regs->cp0_epc; |
502 | force_sig_info(SIGFPE, &info, current); | 554 | force_sig_info(SIGFPE, &info, current); |
503 | } | 555 | } |
504 | 556 | ||
@@ -512,6 +564,14 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) | |||
512 | 564 | ||
513 | preempt_disable(); | 565 | preempt_disable(); |
514 | 566 | ||
567 | #ifdef CONFIG_PREEMPT | ||
568 | if (!is_fpu_owner()) { | ||
569 | /* We might lose fpu before disabling preempt... */ | ||
570 | own_fpu(); | ||
571 | BUG_ON(!used_math()); | ||
572 | restore_fp(current); | ||
573 | } | ||
574 | #endif | ||
515 | /* | 575 | /* |
516 | * Unimplemented operation exception. If we've got the full | 576 | * Unimplemented operation exception. If we've got the full |
517 | * software emulator on-board, let's use it... | 577 | * software emulator on-board, let's use it... |
@@ -523,11 +583,18 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) | |||
523 | * a bit extreme for what should be an infrequent event. | 583 | * a bit extreme for what should be an infrequent event. |
524 | */ | 584 | */ |
525 | save_fp(current); | 585 | save_fp(current); |
586 | /* Ensure 'resume' not overwrite saved fp context again. */ | ||
587 | lose_fpu(); | ||
588 | |||
589 | preempt_enable(); | ||
526 | 590 | ||
527 | /* Run the emulator */ | 591 | /* Run the emulator */ |
528 | sig = fpu_emulator_cop1Handler (0, regs, | 592 | sig = fpu_emulator_cop1Handler (regs, |
529 | ¤t->thread.fpu.soft); | 593 | ¤t->thread.fpu.soft); |
530 | 594 | ||
595 | preempt_disable(); | ||
596 | |||
597 | own_fpu(); /* Using the FPU again. */ | ||
531 | /* | 598 | /* |
532 | * We can't allow the emulated instruction to leave any of | 599 | * We can't allow the emulated instruction to leave any of |
533 | * the cause bit set in $fcr31. | 600 | * the cause bit set in $fcr31. |
@@ -584,7 +651,7 @@ asmlinkage void do_bp(struct pt_regs *regs) | |||
584 | info.si_code = FPE_INTOVF; | 651 | info.si_code = FPE_INTOVF; |
585 | info.si_signo = SIGFPE; | 652 | info.si_signo = SIGFPE; |
586 | info.si_errno = 0; | 653 | info.si_errno = 0; |
587 | info.si_addr = (void *)regs->cp0_epc; | 654 | info.si_addr = (void __user *) regs->cp0_epc; |
588 | force_sig_info(SIGFPE, &info, current); | 655 | force_sig_info(SIGFPE, &info, current); |
589 | break; | 656 | break; |
590 | default: | 657 | default: |
@@ -621,7 +688,7 @@ asmlinkage void do_tr(struct pt_regs *regs) | |||
621 | info.si_code = FPE_INTOVF; | 688 | info.si_code = FPE_INTOVF; |
622 | info.si_signo = SIGFPE; | 689 | info.si_signo = SIGFPE; |
623 | info.si_errno = 0; | 690 | info.si_errno = 0; |
624 | info.si_addr = (void *)regs->cp0_epc; | 691 | info.si_addr = (void __user *) regs->cp0_epc; |
625 | force_sig_info(SIGFPE, &info, current); | 692 | force_sig_info(SIGFPE, &info, current); |
626 | break; | 693 | break; |
627 | default: | 694 | default: |
@@ -637,6 +704,9 @@ asmlinkage void do_ri(struct pt_regs *regs) | |||
637 | if (!simulate_llsc(regs)) | 704 | if (!simulate_llsc(regs)) |
638 | return; | 705 | return; |
639 | 706 | ||
707 | if (!simulate_rdhwr(regs)) | ||
708 | return; | ||
709 | |||
640 | force_sig(SIGILL, current); | 710 | force_sig(SIGILL, current); |
641 | } | 711 | } |
642 | 712 | ||
@@ -650,11 +720,13 @@ asmlinkage void do_cpu(struct pt_regs *regs) | |||
650 | 720 | ||
651 | switch (cpid) { | 721 | switch (cpid) { |
652 | case 0: | 722 | case 0: |
653 | if (cpu_has_llsc) | 723 | if (!cpu_has_llsc) |
654 | break; | 724 | if (!simulate_llsc(regs)) |
725 | return; | ||
655 | 726 | ||
656 | if (!simulate_llsc(regs)) | 727 | if (!simulate_rdhwr(regs)) |
657 | return; | 728 | return; |
729 | |||
658 | break; | 730 | break; |
659 | 731 | ||
660 | case 1: | 732 | case 1: |
@@ -668,15 +740,15 @@ asmlinkage void do_cpu(struct pt_regs *regs) | |||
668 | set_used_math(); | 740 | set_used_math(); |
669 | } | 741 | } |
670 | 742 | ||
743 | preempt_enable(); | ||
744 | |||
671 | if (!cpu_has_fpu) { | 745 | if (!cpu_has_fpu) { |
672 | int sig = fpu_emulator_cop1Handler(0, regs, | 746 | int sig = fpu_emulator_cop1Handler(regs, |
673 | ¤t->thread.fpu.soft); | 747 | ¤t->thread.fpu.soft); |
674 | if (sig) | 748 | if (sig) |
675 | force_sig(sig, current); | 749 | force_sig(sig, current); |
676 | } | 750 | } |
677 | 751 | ||
678 | preempt_enable(); | ||
679 | |||
680 | return; | 752 | return; |
681 | 753 | ||
682 | case 2: | 754 | case 2: |
@@ -716,6 +788,22 @@ asmlinkage void do_mcheck(struct pt_regs *regs) | |||
716 | (regs->cp0_status & ST0_TS) ? "" : "not "); | 788 | (regs->cp0_status & ST0_TS) ? "" : "not "); |
717 | } | 789 | } |
718 | 790 | ||
791 | asmlinkage void do_mt(struct pt_regs *regs) | ||
792 | { | ||
793 | die_if_kernel("MIPS MT Thread exception in kernel", regs); | ||
794 | |||
795 | force_sig(SIGILL, current); | ||
796 | } | ||
797 | |||
798 | |||
799 | asmlinkage void do_dsp(struct pt_regs *regs) | ||
800 | { | ||
801 | if (cpu_has_dsp) | ||
802 | panic("Unexpected DSP exception\n"); | ||
803 | |||
804 | force_sig(SIGILL, current); | ||
805 | } | ||
806 | |||
719 | asmlinkage void do_reserved(struct pt_regs *regs) | 807 | asmlinkage void do_reserved(struct pt_regs *regs) |
720 | { | 808 | { |
721 | /* | 809 | /* |
@@ -728,6 +816,12 @@ asmlinkage void do_reserved(struct pt_regs *regs) | |||
728 | (regs->cp0_cause & 0x7f) >> 2); | 816 | (regs->cp0_cause & 0x7f) >> 2); |
729 | } | 817 | } |
730 | 818 | ||
819 | asmlinkage void do_default_vi(struct pt_regs *regs) | ||
820 | { | ||
821 | show_regs(regs); | ||
822 | panic("Caught unexpected vectored interrupt."); | ||
823 | } | ||
824 | |||
731 | /* | 825 | /* |
732 | * Some MIPS CPUs can enable/disable for cache parity detection, but do | 826 | * Some MIPS CPUs can enable/disable for cache parity detection, but do |
733 | * it different ways. | 827 | * it different ways. |
@@ -736,16 +830,12 @@ static inline void parity_protection_init(void) | |||
736 | { | 830 | { |
737 | switch (current_cpu_data.cputype) { | 831 | switch (current_cpu_data.cputype) { |
738 | case CPU_24K: | 832 | case CPU_24K: |
739 | /* 24K cache parity not currently implemented in FPGA */ | ||
740 | printk(KERN_INFO "Disable cache parity protection for " | ||
741 | "MIPS 24K CPU.\n"); | ||
742 | write_c0_ecc(read_c0_ecc() & ~0x80000000); | ||
743 | break; | ||
744 | case CPU_5KC: | 833 | case CPU_5KC: |
745 | /* Set the PE bit (bit 31) in the c0_ecc register. */ | 834 | write_c0_ecc(0x80000000); |
746 | printk(KERN_INFO "Enable cache parity protection for " | 835 | back_to_back_c0_hazard(); |
747 | "MIPS 5KC/24K CPUs.\n"); | 836 | /* Set the PE bit (bit 31) in the c0_errctl register. */ |
748 | write_c0_ecc(read_c0_ecc() | 0x80000000); | 837 | printk(KERN_INFO "Cache parity protection %sabled\n", |
838 | (read_c0_ecc() & 0x80000000) ? "en" : "dis"); | ||
749 | break; | 839 | break; |
750 | case CPU_20KC: | 840 | case CPU_20KC: |
751 | case CPU_25KF: | 841 | case CPU_25KF: |
@@ -783,7 +873,7 @@ asmlinkage void cache_parity_error(void) | |||
783 | reg_val & (1<<22) ? "E0 " : ""); | 873 | reg_val & (1<<22) ? "E0 " : ""); |
784 | printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1)); | 874 | printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1)); |
785 | 875 | ||
786 | #if defined(CONFIG_CPU_MIPS32) || defined (CONFIG_CPU_MIPS64) | 876 | #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) |
787 | if (reg_val & (1<<22)) | 877 | if (reg_val & (1<<22)) |
788 | printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0()); | 878 | printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0()); |
789 | 879 | ||
@@ -840,7 +930,11 @@ void nmi_exception_handler(struct pt_regs *regs) | |||
840 | while(1) ; | 930 | while(1) ; |
841 | } | 931 | } |
842 | 932 | ||
933 | #define VECTORSPACING 0x100 /* for EI/VI mode */ | ||
934 | |||
935 | unsigned long ebase; | ||
843 | unsigned long exception_handlers[32]; | 936 | unsigned long exception_handlers[32]; |
937 | unsigned long vi_handlers[64]; | ||
844 | 938 | ||
845 | /* | 939 | /* |
846 | * As a side effect of the way this is implemented we're limited | 940 | * As a side effect of the way this is implemented we're limited |
@@ -854,13 +948,156 @@ void *set_except_vector(int n, void *addr) | |||
854 | 948 | ||
855 | exception_handlers[n] = handler; | 949 | exception_handlers[n] = handler; |
856 | if (n == 0 && cpu_has_divec) { | 950 | if (n == 0 && cpu_has_divec) { |
857 | *(volatile u32 *)(CAC_BASE + 0x200) = 0x08000000 | | 951 | *(volatile u32 *)(ebase + 0x200) = 0x08000000 | |
858 | (0x03ffffff & (handler >> 2)); | 952 | (0x03ffffff & (handler >> 2)); |
859 | flush_icache_range(CAC_BASE + 0x200, CAC_BASE + 0x204); | 953 | flush_icache_range(ebase + 0x200, ebase + 0x204); |
860 | } | 954 | } |
861 | return (void *)old_handler; | 955 | return (void *)old_handler; |
862 | } | 956 | } |
863 | 957 | ||
958 | #ifdef CONFIG_CPU_MIPSR2 | ||
959 | /* | ||
960 | * Shadow register allocation | ||
961 | * FIXME: SMP... | ||
962 | */ | ||
963 | |||
964 | /* MIPSR2 shadow register sets */ | ||
965 | struct shadow_registers { | ||
966 | spinlock_t sr_lock; /* */ | ||
967 | int sr_supported; /* Number of shadow register sets supported */ | ||
968 | int sr_allocated; /* Bitmap of allocated shadow registers */ | ||
969 | } shadow_registers; | ||
970 | |||
971 | void mips_srs_init(void) | ||
972 | { | ||
973 | #ifdef CONFIG_CPU_MIPSR2_SRS | ||
974 | shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1; | ||
975 | printk ("%d MIPSR2 register sets available\n", shadow_registers.sr_supported); | ||
976 | #else | ||
977 | shadow_registers.sr_supported = 1; | ||
978 | #endif | ||
979 | shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */ | ||
980 | spin_lock_init(&shadow_registers.sr_lock); | ||
981 | } | ||
982 | |||
983 | int mips_srs_max(void) | ||
984 | { | ||
985 | return shadow_registers.sr_supported; | ||
986 | } | ||
987 | |||
988 | int mips_srs_alloc (void) | ||
989 | { | ||
990 | struct shadow_registers *sr = &shadow_registers; | ||
991 | unsigned long flags; | ||
992 | int set; | ||
993 | |||
994 | spin_lock_irqsave(&sr->sr_lock, flags); | ||
995 | |||
996 | for (set = 0; set < sr->sr_supported; set++) { | ||
997 | if ((sr->sr_allocated & (1 << set)) == 0) { | ||
998 | sr->sr_allocated |= 1 << set; | ||
999 | spin_unlock_irqrestore(&sr->sr_lock, flags); | ||
1000 | return set; | ||
1001 | } | ||
1002 | } | ||
1003 | |||
1004 | /* None available */ | ||
1005 | spin_unlock_irqrestore(&sr->sr_lock, flags); | ||
1006 | return -1; | ||
1007 | } | ||
1008 | |||
1009 | void mips_srs_free (int set) | ||
1010 | { | ||
1011 | struct shadow_registers *sr = &shadow_registers; | ||
1012 | unsigned long flags; | ||
1013 | |||
1014 | spin_lock_irqsave(&sr->sr_lock, flags); | ||
1015 | sr->sr_allocated &= ~(1 << set); | ||
1016 | spin_unlock_irqrestore(&sr->sr_lock, flags); | ||
1017 | } | ||
1018 | |||
1019 | void *set_vi_srs_handler (int n, void *addr, int srs) | ||
1020 | { | ||
1021 | unsigned long handler; | ||
1022 | unsigned long old_handler = vi_handlers[n]; | ||
1023 | u32 *w; | ||
1024 | unsigned char *b; | ||
1025 | |||
1026 | if (!cpu_has_veic && !cpu_has_vint) | ||
1027 | BUG(); | ||
1028 | |||
1029 | if (addr == NULL) { | ||
1030 | handler = (unsigned long) do_default_vi; | ||
1031 | srs = 0; | ||
1032 | } | ||
1033 | else | ||
1034 | handler = (unsigned long) addr; | ||
1035 | vi_handlers[n] = (unsigned long) addr; | ||
1036 | |||
1037 | b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); | ||
1038 | |||
1039 | if (srs >= mips_srs_max()) | ||
1040 | panic("Shadow register set %d not supported", srs); | ||
1041 | |||
1042 | if (cpu_has_veic) { | ||
1043 | if (board_bind_eic_interrupt) | ||
1044 | board_bind_eic_interrupt (n, srs); | ||
1045 | } | ||
1046 | else if (cpu_has_vint) { | ||
1047 | /* SRSMap is only defined if shadow sets are implemented */ | ||
1048 | if (mips_srs_max() > 1) | ||
1049 | change_c0_srsmap (0xf << n*4, srs << n*4); | ||
1050 | } | ||
1051 | |||
1052 | if (srs == 0) { | ||
1053 | /* | ||
1054 | * If no shadow set is selected then use the default handler | ||
1055 | * that does normal register saving and a standard interrupt exit | ||
1056 | */ | ||
1057 | |||
1058 | extern char except_vec_vi, except_vec_vi_lui; | ||
1059 | extern char except_vec_vi_ori, except_vec_vi_end; | ||
1060 | const int handler_len = &except_vec_vi_end - &except_vec_vi; | ||
1061 | const int lui_offset = &except_vec_vi_lui - &except_vec_vi; | ||
1062 | const int ori_offset = &except_vec_vi_ori - &except_vec_vi; | ||
1063 | |||
1064 | if (handler_len > VECTORSPACING) { | ||
1065 | /* | ||
1066 | * Sigh... panicing won't help as the console | ||
1067 | * is probably not configured :( | ||
1068 | */ | ||
1069 | panic ("VECTORSPACING too small"); | ||
1070 | } | ||
1071 | |||
1072 | memcpy (b, &except_vec_vi, handler_len); | ||
1073 | w = (u32 *)(b + lui_offset); | ||
1074 | *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); | ||
1075 | w = (u32 *)(b + ori_offset); | ||
1076 | *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); | ||
1077 | flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len)); | ||
1078 | } | ||
1079 | else { | ||
1080 | /* | ||
1081 | * In other cases jump directly to the interrupt handler | ||
1082 | * | ||
1083 | * It is the handlers responsibility to save registers if required | ||
1084 | * (eg hi/lo) and return from the exception using "eret" | ||
1085 | */ | ||
1086 | w = (u32 *)b; | ||
1087 | *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ | ||
1088 | *w = 0; | ||
1089 | flush_icache_range((unsigned long)b, (unsigned long)(b+8)); | ||
1090 | } | ||
1091 | |||
1092 | return (void *)old_handler; | ||
1093 | } | ||
1094 | |||
1095 | void *set_vi_handler (int n, void *addr) | ||
1096 | { | ||
1097 | return set_vi_srs_handler (n, addr, 0); | ||
1098 | } | ||
1099 | #endif | ||
1100 | |||
864 | /* | 1101 | /* |
865 | * This is used by native signal handling | 1102 | * This is used by native signal handling |
866 | */ | 1103 | */ |
@@ -912,6 +1149,7 @@ static inline void signal32_init(void) | |||
912 | 1149 | ||
913 | extern void cpu_cache_init(void); | 1150 | extern void cpu_cache_init(void); |
914 | extern void tlb_init(void); | 1151 | extern void tlb_init(void); |
1152 | extern void flush_tlb_handlers(void); | ||
915 | 1153 | ||
916 | void __init per_cpu_trap_init(void) | 1154 | void __init per_cpu_trap_init(void) |
917 | { | 1155 | { |
@@ -929,15 +1167,32 @@ void __init per_cpu_trap_init(void) | |||
929 | #endif | 1167 | #endif |
930 | if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) | 1168 | if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) |
931 | status_set |= ST0_XX; | 1169 | status_set |= ST0_XX; |
932 | change_c0_status(ST0_CU|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, | 1170 | change_c0_status(ST0_CU|ST0_MX|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, |
933 | status_set); | 1171 | status_set); |
934 | 1172 | ||
1173 | if (cpu_has_dsp) | ||
1174 | set_c0_status(ST0_MX); | ||
1175 | |||
1176 | #ifdef CONFIG_CPU_MIPSR2 | ||
1177 | write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */ | ||
1178 | #endif | ||
1179 | |||
935 | /* | 1180 | /* |
936 | * Some MIPS CPUs have a dedicated interrupt vector which reduces the | 1181 | * Interrupt handling. |
937 | * interrupt processing overhead. Use it where available. | ||
938 | */ | 1182 | */ |
939 | if (cpu_has_divec) | 1183 | if (cpu_has_veic || cpu_has_vint) { |
940 | set_c0_cause(CAUSEF_IV); | 1184 | write_c0_ebase (ebase); |
1185 | /* Setting vector spacing enables EI/VI mode */ | ||
1186 | change_c0_intctl (0x3e0, VECTORSPACING); | ||
1187 | } | ||
1188 | if (cpu_has_divec) { | ||
1189 | if (cpu_has_mipsmt) { | ||
1190 | unsigned int vpflags = dvpe(); | ||
1191 | set_c0_cause(CAUSEF_IV); | ||
1192 | evpe(vpflags); | ||
1193 | } else | ||
1194 | set_c0_cause(CAUSEF_IV); | ||
1195 | } | ||
941 | 1196 | ||
942 | cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; | 1197 | cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; |
943 | TLBMISS_HANDLER_SETUP(); | 1198 | TLBMISS_HANDLER_SETUP(); |
@@ -951,13 +1206,41 @@ void __init per_cpu_trap_init(void) | |||
951 | tlb_init(); | 1206 | tlb_init(); |
952 | } | 1207 | } |
953 | 1208 | ||
1209 | /* Install CPU exception handler */ | ||
1210 | void __init set_handler (unsigned long offset, void *addr, unsigned long size) | ||
1211 | { | ||
1212 | memcpy((void *)(ebase + offset), addr, size); | ||
1213 | flush_icache_range(ebase + offset, ebase + offset + size); | ||
1214 | } | ||
1215 | |||
1216 | /* Install uncached CPU exception handler */ | ||
1217 | void __init set_uncached_handler (unsigned long offset, void *addr, unsigned long size) | ||
1218 | { | ||
1219 | #ifdef CONFIG_32BIT | ||
1220 | unsigned long uncached_ebase = KSEG1ADDR(ebase); | ||
1221 | #endif | ||
1222 | #ifdef CONFIG_64BIT | ||
1223 | unsigned long uncached_ebase = TO_UNCAC(ebase); | ||
1224 | #endif | ||
1225 | |||
1226 | memcpy((void *)(uncached_ebase + offset), addr, size); | ||
1227 | } | ||
1228 | |||
954 | void __init trap_init(void) | 1229 | void __init trap_init(void) |
955 | { | 1230 | { |
956 | extern char except_vec3_generic, except_vec3_r4000; | 1231 | extern char except_vec3_generic, except_vec3_r4000; |
957 | extern char except_vec_ejtag_debug; | ||
958 | extern char except_vec4; | 1232 | extern char except_vec4; |
959 | unsigned long i; | 1233 | unsigned long i; |
960 | 1234 | ||
1235 | if (cpu_has_veic || cpu_has_vint) | ||
1236 | ebase = (unsigned long) alloc_bootmem_low_pages (0x200 + VECTORSPACING*64); | ||
1237 | else | ||
1238 | ebase = CAC_BASE; | ||
1239 | |||
1240 | #ifdef CONFIG_CPU_MIPSR2 | ||
1241 | mips_srs_init(); | ||
1242 | #endif | ||
1243 | |||
961 | per_cpu_trap_init(); | 1244 | per_cpu_trap_init(); |
962 | 1245 | ||
963 | /* | 1246 | /* |
@@ -965,7 +1248,7 @@ void __init trap_init(void) | |||
965 | * This will be overriden later as suitable for a particular | 1248 | * This will be overriden later as suitable for a particular |
966 | * configuration. | 1249 | * configuration. |
967 | */ | 1250 | */ |
968 | memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80); | 1251 | set_handler(0x180, &except_vec3_generic, 0x80); |
969 | 1252 | ||
970 | /* | 1253 | /* |
971 | * Setup default vectors | 1254 | * Setup default vectors |
@@ -977,8 +1260,8 @@ void __init trap_init(void) | |||
977 | * Copy the EJTAG debug exception vector handler code to it's final | 1260 | * Copy the EJTAG debug exception vector handler code to it's final |
978 | * destination. | 1261 | * destination. |
979 | */ | 1262 | */ |
980 | if (cpu_has_ejtag) | 1263 | if (cpu_has_ejtag && board_ejtag_handler_setup) |
981 | memcpy((void *)(CAC_BASE + 0x300), &except_vec_ejtag_debug, 0x80); | 1264 | board_ejtag_handler_setup (); |
982 | 1265 | ||
983 | /* | 1266 | /* |
984 | * Only some CPUs have the watch exceptions. | 1267 | * Only some CPUs have the watch exceptions. |
@@ -987,11 +1270,15 @@ void __init trap_init(void) | |||
987 | set_except_vector(23, handle_watch); | 1270 | set_except_vector(23, handle_watch); |
988 | 1271 | ||
989 | /* | 1272 | /* |
990 | * Some MIPS CPUs have a dedicated interrupt vector which reduces the | 1273 | * Initialise interrupt handlers |
991 | * interrupt processing overhead. Use it where available. | ||
992 | */ | 1274 | */ |
993 | if (cpu_has_divec) | 1275 | if (cpu_has_veic || cpu_has_vint) { |
994 | memcpy((void *)(CAC_BASE + 0x200), &except_vec4, 0x8); | 1276 | int nvec = cpu_has_veic ? 64 : 8; |
1277 | for (i = 0; i < nvec; i++) | ||
1278 | set_vi_handler (i, NULL); | ||
1279 | } | ||
1280 | else if (cpu_has_divec) | ||
1281 | set_handler(0x200, &except_vec4, 0x8); | ||
995 | 1282 | ||
996 | /* | 1283 | /* |
997 | * Some CPUs can enable/disable for cache parity detection, but does | 1284 | * Some CPUs can enable/disable for cache parity detection, but does |
@@ -1023,21 +1310,6 @@ void __init trap_init(void) | |||
1023 | set_except_vector(11, handle_cpu); | 1310 | set_except_vector(11, handle_cpu); |
1024 | set_except_vector(12, handle_ov); | 1311 | set_except_vector(12, handle_ov); |
1025 | set_except_vector(13, handle_tr); | 1312 | set_except_vector(13, handle_tr); |
1026 | set_except_vector(22, handle_mdmx); | ||
1027 | |||
1028 | if (cpu_has_fpu && !cpu_has_nofpuex) | ||
1029 | set_except_vector(15, handle_fpe); | ||
1030 | |||
1031 | if (cpu_has_mcheck) | ||
1032 | set_except_vector(24, handle_mcheck); | ||
1033 | |||
1034 | if (cpu_has_vce) | ||
1035 | /* Special exception: R4[04]00 uses also the divec space. */ | ||
1036 | memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100); | ||
1037 | else if (cpu_has_4kex) | ||
1038 | memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80); | ||
1039 | else | ||
1040 | memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80); | ||
1041 | 1313 | ||
1042 | if (current_cpu_data.cputype == CPU_R6000 || | 1314 | if (current_cpu_data.cputype == CPU_R6000 || |
1043 | current_cpu_data.cputype == CPU_R6000A) { | 1315 | current_cpu_data.cputype == CPU_R6000A) { |
@@ -1053,10 +1325,37 @@ void __init trap_init(void) | |||
1053 | //set_except_vector(15, handle_ndc); | 1325 | //set_except_vector(15, handle_ndc); |
1054 | } | 1326 | } |
1055 | 1327 | ||
1328 | |||
1329 | if (board_nmi_handler_setup) | ||
1330 | board_nmi_handler_setup(); | ||
1331 | |||
1332 | if (cpu_has_fpu && !cpu_has_nofpuex) | ||
1333 | set_except_vector(15, handle_fpe); | ||
1334 | |||
1335 | set_except_vector(22, handle_mdmx); | ||
1336 | |||
1337 | if (cpu_has_mcheck) | ||
1338 | set_except_vector(24, handle_mcheck); | ||
1339 | |||
1340 | if (cpu_has_mipsmt) | ||
1341 | set_except_vector(25, handle_mt); | ||
1342 | |||
1343 | if (cpu_has_dsp) | ||
1344 | set_except_vector(26, handle_dsp); | ||
1345 | |||
1346 | if (cpu_has_vce) | ||
1347 | /* Special exception: R4[04]00 uses also the divec space. */ | ||
1348 | memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100); | ||
1349 | else if (cpu_has_4kex) | ||
1350 | memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80); | ||
1351 | else | ||
1352 | memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80); | ||
1353 | |||
1056 | signal_init(); | 1354 | signal_init(); |
1057 | #ifdef CONFIG_MIPS32_COMPAT | 1355 | #ifdef CONFIG_MIPS32_COMPAT |
1058 | signal32_init(); | 1356 | signal32_init(); |
1059 | #endif | 1357 | #endif |
1060 | 1358 | ||
1061 | flush_icache_range(CAC_BASE, CAC_BASE + 0x400); | 1359 | flush_icache_range(ebase, ebase + 0x400); |
1360 | flush_tlb_handlers(); | ||
1062 | } | 1361 | } |