diff options
author | Steven J. Hill <Steven.Hill@imgtec.com> | 2013-03-25 13:15:55 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2013-05-09 11:55:18 -0400 |
commit | 2a0b24f56c2492b932f1aed617ae80fb23500d21 (patch) | |
tree | c9aec2872f912c65b83a92a66fe94f6006427d73 /arch/mips/kernel | |
parent | 102cedc32a6e3cd537374a3678d407591d5a6fab (diff) |
MIPS: microMIPS: Add support for exception handling.
All exceptions must be taken in microMIPS mode, never in classic
MIPS mode or the kernel falls apart. A few NOP instructions are
used to maintain the correct alignment of microMIPS versions of
the exception vectors.
Signed-off-by: Steven J. Hill <Steven.Hill@imgtec.com>
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r-- | arch/mips/kernel/cpu-probe.c | 3 | ||||
-rw-r--r-- | arch/mips/kernel/genex.S | 73 | ||||
-rw-r--r-- | arch/mips/kernel/scall32-o32.S | 9 | ||||
-rw-r--r-- | arch/mips/kernel/smtc-asm.S | 3 | ||||
-rw-r--r-- | arch/mips/kernel/traps.c | 290 |
5 files changed, 277 insertions, 101 deletions
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 5fe66a0c3224..4bbffdb9024f 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c | |||
@@ -470,6 +470,9 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c) | |||
470 | c->options |= MIPS_CPU_ULRI; | 470 | c->options |= MIPS_CPU_ULRI; |
471 | if (config3 & MIPS_CONF3_ISA) | 471 | if (config3 & MIPS_CONF3_ISA) |
472 | c->options |= MIPS_CPU_MICROMIPS; | 472 | c->options |= MIPS_CPU_MICROMIPS; |
473 | #ifdef CONFIG_CPU_MICROMIPS | ||
474 | write_c0_config3(read_c0_config3() | MIPS_CONF3_ISA_OE); | ||
475 | #endif | ||
473 | if (config3 & MIPS_CONF3_VZ) | 476 | if (config3 & MIPS_CONF3_VZ) |
474 | c->ases |= MIPS_ASE_VZ; | 477 | c->ases |= MIPS_ASE_VZ; |
475 | 478 | ||
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 5360b1db337d..5c2ba9f08a80 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S | |||
@@ -5,8 +5,8 @@ | |||
5 | * | 5 | * |
6 | * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle | 6 | * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle |
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | 7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
8 | * Copyright (C) 2001 MIPS Technologies, Inc. | ||
9 | * Copyright (C) 2002, 2007 Maciej W. Rozycki | 8 | * Copyright (C) 2002, 2007 Maciej W. Rozycki |
9 | * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. | ||
10 | */ | 10 | */ |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | 12 | ||
@@ -21,8 +21,10 @@ | |||
21 | #include <asm/war.h> | 21 | #include <asm/war.h> |
22 | #include <asm/thread_info.h> | 22 | #include <asm/thread_info.h> |
23 | 23 | ||
24 | #ifdef CONFIG_MIPS_MT_SMTC | ||
24 | #define PANIC_PIC(msg) \ | 25 | #define PANIC_PIC(msg) \ |
25 | .set push; \ | 26 | .set push; \ |
27 | .set nomicromips; \ | ||
26 | .set reorder; \ | 28 | .set reorder; \ |
27 | PTR_LA a0,8f; \ | 29 | PTR_LA a0,8f; \ |
28 | .set noat; \ | 30 | .set noat; \ |
@@ -31,17 +33,10 @@ | |||
31 | 9: b 9b; \ | 33 | 9: b 9b; \ |
32 | .set pop; \ | 34 | .set pop; \ |
33 | TEXT(msg) | 35 | TEXT(msg) |
36 | #endif | ||
34 | 37 | ||
35 | __INIT | 38 | __INIT |
36 | 39 | ||
37 | NESTED(except_vec0_generic, 0, sp) | ||
38 | PANIC_PIC("Exception vector 0 called") | ||
39 | END(except_vec0_generic) | ||
40 | |||
41 | NESTED(except_vec1_generic, 0, sp) | ||
42 | PANIC_PIC("Exception vector 1 called") | ||
43 | END(except_vec1_generic) | ||
44 | |||
45 | /* | 40 | /* |
46 | * General exception vector for all other CPUs. | 41 | * General exception vector for all other CPUs. |
47 | * | 42 | * |
@@ -138,12 +133,19 @@ LEAF(r4k_wait) | |||
138 | nop | 133 | nop |
139 | nop | 134 | nop |
140 | nop | 135 | nop |
136 | #ifdef CONFIG_CPU_MICROMIPS | ||
137 | nop | ||
138 | nop | ||
139 | nop | ||
140 | nop | ||
141 | #endif | ||
141 | .set mips3 | 142 | .set mips3 |
142 | wait | 143 | wait |
143 | /* end of rollback region (the region size must be power of two) */ | 144 | /* end of rollback region (the region size must be power of two) */ |
144 | .set pop | ||
145 | 1: | 145 | 1: |
146 | jr ra | 146 | jr ra |
147 | nop | ||
148 | .set pop | ||
147 | END(r4k_wait) | 149 | END(r4k_wait) |
148 | 150 | ||
149 | .macro BUILD_ROLLBACK_PROLOGUE handler | 151 | .macro BUILD_ROLLBACK_PROLOGUE handler |
@@ -201,7 +203,11 @@ NESTED(handle_int, PT_SIZE, sp) | |||
201 | LONG_L s0, TI_REGS($28) | 203 | LONG_L s0, TI_REGS($28) |
202 | LONG_S sp, TI_REGS($28) | 204 | LONG_S sp, TI_REGS($28) |
203 | PTR_LA ra, ret_from_irq | 205 | PTR_LA ra, ret_from_irq |
204 | j plat_irq_dispatch | 206 | PTR_LA v0, plat_irq_dispatch |
207 | jr v0 | ||
208 | #ifdef CONFIG_CPU_MICROMIPS | ||
209 | nop | ||
210 | #endif | ||
205 | END(handle_int) | 211 | END(handle_int) |
206 | 212 | ||
207 | __INIT | 213 | __INIT |
@@ -222,11 +228,14 @@ NESTED(except_vec4, 0, sp) | |||
222 | /* | 228 | /* |
223 | * EJTAG debug exception handler. | 229 | * EJTAG debug exception handler. |
224 | * The EJTAG debug exception entry point is 0xbfc00480, which | 230 | * The EJTAG debug exception entry point is 0xbfc00480, which |
225 | * normally is in the boot PROM, so the boot PROM must do a | 231 | * normally is in the boot PROM, so the boot PROM must do an |
226 | * unconditional jump to this vector. | 232 | * unconditional jump to this vector. |
227 | */ | 233 | */ |
228 | NESTED(except_vec_ejtag_debug, 0, sp) | 234 | NESTED(except_vec_ejtag_debug, 0, sp) |
229 | j ejtag_debug_handler | 235 | j ejtag_debug_handler |
236 | #ifdef CONFIG_CPU_MICROMIPS | ||
237 | nop | ||
238 | #endif | ||
230 | END(except_vec_ejtag_debug) | 239 | END(except_vec_ejtag_debug) |
231 | 240 | ||
232 | __FINIT | 241 | __FINIT |
@@ -251,9 +260,10 @@ NESTED(except_vec_vi, 0, sp) | |||
251 | FEXPORT(except_vec_vi_mori) | 260 | FEXPORT(except_vec_vi_mori) |
252 | ori a0, $0, 0 | 261 | ori a0, $0, 0 |
253 | #endif /* CONFIG_MIPS_MT_SMTC */ | 262 | #endif /* CONFIG_MIPS_MT_SMTC */ |
263 | PTR_LA v1, except_vec_vi_handler | ||
254 | FEXPORT(except_vec_vi_lui) | 264 | FEXPORT(except_vec_vi_lui) |
255 | lui v0, 0 /* Patched */ | 265 | lui v0, 0 /* Patched */ |
256 | j except_vec_vi_handler | 266 | jr v1 |
257 | FEXPORT(except_vec_vi_ori) | 267 | FEXPORT(except_vec_vi_ori) |
258 | ori v0, 0 /* Patched */ | 268 | ori v0, 0 /* Patched */ |
259 | .set pop | 269 | .set pop |
@@ -354,6 +364,9 @@ EXPORT(ejtag_debug_buffer) | |||
354 | */ | 364 | */ |
355 | NESTED(except_vec_nmi, 0, sp) | 365 | NESTED(except_vec_nmi, 0, sp) |
356 | j nmi_handler | 366 | j nmi_handler |
367 | #ifdef CONFIG_CPU_MICROMIPS | ||
368 | nop | ||
369 | #endif | ||
357 | END(except_vec_nmi) | 370 | END(except_vec_nmi) |
358 | 371 | ||
359 | __FINIT | 372 | __FINIT |
@@ -500,13 +513,35 @@ NESTED(nmi_handler, PT_SIZE, sp) | |||
500 | .set push | 513 | .set push |
501 | .set noat | 514 | .set noat |
502 | .set noreorder | 515 | .set noreorder |
503 | /* 0x7c03e83b: rdhwr v1,$29 */ | 516 | /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ |
517 | /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ | ||
504 | MFC0 k1, CP0_EPC | 518 | MFC0 k1, CP0_EPC |
505 | lui k0, 0x7c03 | 519 | #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) |
506 | lw k1, (k1) | 520 | and k0, k1, 1 |
507 | ori k0, 0xe83b | 521 | beqz k0, 1f |
508 | .set reorder | 522 | xor k1, k0 |
523 | lhu k0, (k1) | ||
524 | lhu k1, 2(k1) | ||
525 | ins k1, k0, 16, 16 | ||
526 | lui k0, 0x007d | ||
527 | b docheck | ||
528 | ori k0, 0x6b3c | ||
529 | 1: | ||
530 | lui k0, 0x7c03 | ||
531 | lw k1, (k1) | ||
532 | ori k0, 0xe83b | ||
533 | #else | ||
534 | andi k0, k1, 1 | ||
535 | bnez k0, handle_ri | ||
536 | lui k0, 0x7c03 | ||
537 | lw k1, (k1) | ||
538 | ori k0, 0xe83b | ||
539 | #endif | ||
540 | .set reorder | ||
541 | docheck: | ||
509 | bne k0, k1, handle_ri /* if not ours */ | 542 | bne k0, k1, handle_ri /* if not ours */ |
543 | |||
544 | isrdhwr: | ||
510 | /* The insn is rdhwr. No need to check CAUSE.BD here. */ | 545 | /* The insn is rdhwr. No need to check CAUSE.BD here. */ |
511 | get_saved_sp /* k1 := current_thread_info */ | 546 | get_saved_sp /* k1 := current_thread_info */ |
512 | .set noreorder | 547 | .set noreorder |
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 9ea29649fc28..9b36424b03c5 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S | |||
@@ -138,9 +138,18 @@ stackargs: | |||
138 | 5: jr t1 | 138 | 5: jr t1 |
139 | sw t5, 16(sp) # argument #5 to ksp | 139 | sw t5, 16(sp) # argument #5 to ksp |
140 | 140 | ||
141 | #ifdef CONFIG_CPU_MICROMIPS | ||
141 | sw t8, 28(sp) # argument #8 to ksp | 142 | sw t8, 28(sp) # argument #8 to ksp |
143 | nop | ||
142 | sw t7, 24(sp) # argument #7 to ksp | 144 | sw t7, 24(sp) # argument #7 to ksp |
145 | nop | ||
143 | sw t6, 20(sp) # argument #6 to ksp | 146 | sw t6, 20(sp) # argument #6 to ksp |
147 | nop | ||
148 | #else | ||
149 | sw t8, 28(sp) # argument #8 to ksp | ||
150 | sw t7, 24(sp) # argument #7 to ksp | ||
151 | sw t6, 20(sp) # argument #6 to ksp | ||
152 | #endif | ||
144 | 6: j stack_done # go back | 153 | 6: j stack_done # go back |
145 | nop | 154 | nop |
146 | .set pop | 155 | .set pop |
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S index 76016ac0a9c8..2866863a39df 100644 --- a/arch/mips/kernel/smtc-asm.S +++ b/arch/mips/kernel/smtc-asm.S | |||
@@ -49,6 +49,9 @@ CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED?? | |||
49 | .text | 49 | .text |
50 | .align 5 | 50 | .align 5 |
51 | FEXPORT(__smtc_ipi_vector) | 51 | FEXPORT(__smtc_ipi_vector) |
52 | #ifdef CONFIG_CPU_MICROMIPS | ||
53 | nop | ||
54 | #endif | ||
52 | .set noat | 55 | .set noat |
53 | /* Disable thread scheduling to make Status update atomic */ | 56 | /* Disable thread scheduling to make Status update atomic */ |
54 | DMT 27 # dmt k1 | 57 | DMT 27 # dmt k1 |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 333782b83164..571a69c57d82 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -8,8 +8,8 @@ | |||
8 | * Copyright (C) 1998 Ulf Carlsson | 8 | * Copyright (C) 1998 Ulf Carlsson |
9 | * Copyright (C) 1999 Silicon Graphics, Inc. | 9 | * Copyright (C) 1999 Silicon Graphics, Inc. |
10 | * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com | 10 | * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com |
11 | * Copyright (C) 2000, 01 MIPS Technologies, Inc. | ||
12 | * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki | 11 | * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki |
12 | * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved. | ||
13 | */ | 13 | */ |
14 | #include <linux/bug.h> | 14 | #include <linux/bug.h> |
15 | #include <linux/compiler.h> | 15 | #include <linux/compiler.h> |
@@ -83,10 +83,6 @@ extern asmlinkage void handle_dsp(void); | |||
83 | extern asmlinkage void handle_mcheck(void); | 83 | extern asmlinkage void handle_mcheck(void); |
84 | extern asmlinkage void handle_reserved(void); | 84 | extern asmlinkage void handle_reserved(void); |
85 | 85 | ||
86 | extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, | ||
87 | struct mips_fpu_struct *ctx, int has_fpu, | ||
88 | void *__user *fault_addr); | ||
89 | |||
90 | void (*board_be_init)(void); | 86 | void (*board_be_init)(void); |
91 | int (*board_be_handler)(struct pt_regs *regs, int is_fixup); | 87 | int (*board_be_handler)(struct pt_regs *regs, int is_fixup); |
92 | void (*board_nmi_handler_setup)(void); | 88 | void (*board_nmi_handler_setup)(void); |
@@ -495,6 +491,12 @@ asmlinkage void do_be(struct pt_regs *regs) | |||
495 | #define SYNC 0x0000000f | 491 | #define SYNC 0x0000000f |
496 | #define RDHWR 0x0000003b | 492 | #define RDHWR 0x0000003b |
497 | 493 | ||
494 | /* microMIPS definitions */ | ||
495 | #define MM_POOL32A_FUNC 0xfc00ffff | ||
496 | #define MM_RDHWR 0x00006b3c | ||
497 | #define MM_RS 0x001f0000 | ||
498 | #define MM_RT 0x03e00000 | ||
499 | |||
498 | /* | 500 | /* |
499 | * The ll_bit is cleared by r*_switch.S | 501 | * The ll_bit is cleared by r*_switch.S |
500 | */ | 502 | */ |
@@ -609,42 +611,62 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) | |||
609 | * Simulate trapping 'rdhwr' instructions to provide user accessible | 611 | * Simulate trapping 'rdhwr' instructions to provide user accessible |
610 | * registers not implemented in hardware. | 612 | * registers not implemented in hardware. |
611 | */ | 613 | */ |
612 | static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode) | 614 | static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt) |
613 | { | 615 | { |
614 | struct thread_info *ti = task_thread_info(current); | 616 | struct thread_info *ti = task_thread_info(current); |
615 | 617 | ||
618 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, | ||
619 | 1, regs, 0); | ||
620 | switch (rd) { | ||
621 | case 0: /* CPU number */ | ||
622 | regs->regs[rt] = smp_processor_id(); | ||
623 | return 0; | ||
624 | case 1: /* SYNCI length */ | ||
625 | regs->regs[rt] = min(current_cpu_data.dcache.linesz, | ||
626 | current_cpu_data.icache.linesz); | ||
627 | return 0; | ||
628 | case 2: /* Read count register */ | ||
629 | regs->regs[rt] = read_c0_count(); | ||
630 | return 0; | ||
631 | case 3: /* Count register resolution */ | ||
632 | switch (current_cpu_data.cputype) { | ||
633 | case CPU_20KC: | ||
634 | case CPU_25KF: | ||
635 | regs->regs[rt] = 1; | ||
636 | break; | ||
637 | default: | ||
638 | regs->regs[rt] = 2; | ||
639 | } | ||
640 | return 0; | ||
641 | case 29: | ||
642 | regs->regs[rt] = ti->tp_value; | ||
643 | return 0; | ||
644 | default: | ||
645 | return -1; | ||
646 | } | ||
647 | } | ||
648 | |||
649 | static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode) | ||
650 | { | ||
616 | if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { | 651 | if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { |
617 | int rd = (opcode & RD) >> 11; | 652 | int rd = (opcode & RD) >> 11; |
618 | int rt = (opcode & RT) >> 16; | 653 | int rt = (opcode & RT) >> 16; |
619 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, | 654 | |
620 | 1, regs, 0); | 655 | simulate_rdhwr(regs, rd, rt); |
621 | switch (rd) { | 656 | return 0; |
622 | case 0: /* CPU number */ | 657 | } |
623 | regs->regs[rt] = smp_processor_id(); | 658 | |
624 | return 0; | 659 | /* Not ours. */ |
625 | case 1: /* SYNCI length */ | 660 | return -1; |
626 | regs->regs[rt] = min(current_cpu_data.dcache.linesz, | 661 | } |
627 | current_cpu_data.icache.linesz); | 662 | |
628 | return 0; | 663 | static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode) |
629 | case 2: /* Read count register */ | 664 | { |
630 | regs->regs[rt] = read_c0_count(); | 665 | if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) { |
631 | return 0; | 666 | int rd = (opcode & MM_RS) >> 16; |
632 | case 3: /* Count register resolution */ | 667 | int rt = (opcode & MM_RT) >> 21; |
633 | switch (current_cpu_data.cputype) { | 668 | simulate_rdhwr(regs, rd, rt); |
634 | case CPU_20KC: | 669 | return 0; |
635 | case CPU_25KF: | ||
636 | regs->regs[rt] = 1; | ||
637 | break; | ||
638 | default: | ||
639 | regs->regs[rt] = 2; | ||
640 | } | ||
641 | return 0; | ||
642 | case 29: | ||
643 | regs->regs[rt] = ti->tp_value; | ||
644 | return 0; | ||
645 | default: | ||
646 | return -1; | ||
647 | } | ||
648 | } | 670 | } |
649 | 671 | ||
650 | /* Not ours. */ | 672 | /* Not ours. */ |
@@ -826,9 +848,29 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, | |||
826 | asmlinkage void do_bp(struct pt_regs *regs) | 848 | asmlinkage void do_bp(struct pt_regs *regs) |
827 | { | 849 | { |
828 | unsigned int opcode, bcode; | 850 | unsigned int opcode, bcode; |
829 | 851 | unsigned long epc; | |
830 | if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) | 852 | u16 instr[2]; |
831 | goto out_sigsegv; | 853 | |
854 | if (get_isa16_mode(regs->cp0_epc)) { | ||
855 | /* Calculate EPC. */ | ||
856 | epc = exception_epc(regs); | ||
857 | if (cpu_has_mmips) { | ||
858 | if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) || | ||
859 | (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))) | ||
860 | goto out_sigsegv; | ||
861 | opcode = (instr[0] << 16) | instr[1]; | ||
862 | } else { | ||
863 | /* MIPS16e mode */ | ||
864 | if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) | ||
865 | goto out_sigsegv; | ||
866 | bcode = (instr[0] >> 6) & 0x3f; | ||
867 | do_trap_or_bp(regs, bcode, "Break"); | ||
868 | return; | ||
869 | } | ||
870 | } else { | ||
871 | if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) | ||
872 | goto out_sigsegv; | ||
873 | } | ||
832 | 874 | ||
833 | /* | 875 | /* |
834 | * There is the ancient bug in the MIPS assemblers that the break | 876 | * There is the ancient bug in the MIPS assemblers that the break |
@@ -869,13 +911,22 @@ out_sigsegv: | |||
869 | asmlinkage void do_tr(struct pt_regs *regs) | 911 | asmlinkage void do_tr(struct pt_regs *regs) |
870 | { | 912 | { |
871 | unsigned int opcode, tcode = 0; | 913 | unsigned int opcode, tcode = 0; |
914 | u16 instr[2]; | ||
915 | unsigned long epc = exception_epc(regs); | ||
872 | 916 | ||
873 | if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) | 917 | if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) || |
874 | goto out_sigsegv; | 918 | (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))) |
919 | goto out_sigsegv; | ||
920 | opcode = (instr[0] << 16) | instr[1]; | ||
875 | 921 | ||
876 | /* Immediate versions don't provide a code. */ | 922 | /* Immediate versions don't provide a code. */ |
877 | if (!(opcode & OPCODE)) | 923 | if (!(opcode & OPCODE)) { |
878 | tcode = ((opcode >> 6) & ((1 << 10) - 1)); | 924 | if (get_isa16_mode(regs->cp0_epc)) |
925 | /* microMIPS */ | ||
926 | tcode = (opcode >> 12) & 0x1f; | ||
927 | else | ||
928 | tcode = ((opcode >> 6) & ((1 << 10) - 1)); | ||
929 | } | ||
879 | 930 | ||
880 | do_trap_or_bp(regs, tcode, "Trap"); | 931 | do_trap_or_bp(regs, tcode, "Trap"); |
881 | return; | 932 | return; |
@@ -888,6 +939,7 @@ asmlinkage void do_ri(struct pt_regs *regs) | |||
888 | { | 939 | { |
889 | unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); | 940 | unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); |
890 | unsigned long old_epc = regs->cp0_epc; | 941 | unsigned long old_epc = regs->cp0_epc; |
942 | unsigned long old31 = regs->regs[31]; | ||
891 | unsigned int opcode = 0; | 943 | unsigned int opcode = 0; |
892 | int status = -1; | 944 | int status = -1; |
893 | 945 | ||
@@ -900,23 +952,37 @@ asmlinkage void do_ri(struct pt_regs *regs) | |||
900 | if (unlikely(compute_return_epc(regs) < 0)) | 952 | if (unlikely(compute_return_epc(regs) < 0)) |
901 | return; | 953 | return; |
902 | 954 | ||
903 | if (unlikely(get_user(opcode, epc) < 0)) | 955 | if (get_isa16_mode(regs->cp0_epc)) { |
904 | status = SIGSEGV; | 956 | unsigned short mmop[2] = { 0 }; |
905 | 957 | ||
906 | if (!cpu_has_llsc && status < 0) | 958 | if (unlikely(get_user(mmop[0], epc) < 0)) |
907 | status = simulate_llsc(regs, opcode); | 959 | status = SIGSEGV; |
960 | if (unlikely(get_user(mmop[1], epc) < 0)) | ||
961 | status = SIGSEGV; | ||
962 | opcode = (mmop[0] << 16) | mmop[1]; | ||
908 | 963 | ||
909 | if (status < 0) | 964 | if (status < 0) |
910 | status = simulate_rdhwr(regs, opcode); | 965 | status = simulate_rdhwr_mm(regs, opcode); |
966 | } else { | ||
967 | if (unlikely(get_user(opcode, epc) < 0)) | ||
968 | status = SIGSEGV; | ||
911 | 969 | ||
912 | if (status < 0) | 970 | if (!cpu_has_llsc && status < 0) |
913 | status = simulate_sync(regs, opcode); | 971 | status = simulate_llsc(regs, opcode); |
972 | |||
973 | if (status < 0) | ||
974 | status = simulate_rdhwr_normal(regs, opcode); | ||
975 | |||
976 | if (status < 0) | ||
977 | status = simulate_sync(regs, opcode); | ||
978 | } | ||
914 | 979 | ||
915 | if (status < 0) | 980 | if (status < 0) |
916 | status = SIGILL; | 981 | status = SIGILL; |
917 | 982 | ||
918 | if (unlikely(status > 0)) { | 983 | if (unlikely(status > 0)) { |
919 | regs->cp0_epc = old_epc; /* Undo skip-over. */ | 984 | regs->cp0_epc = old_epc; /* Undo skip-over. */ |
985 | regs->regs[31] = old31; | ||
920 | force_sig(status, current); | 986 | force_sig(status, current); |
921 | } | 987 | } |
922 | } | 988 | } |
@@ -986,7 +1052,7 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action, | |||
986 | asmlinkage void do_cpu(struct pt_regs *regs) | 1052 | asmlinkage void do_cpu(struct pt_regs *regs) |
987 | { | 1053 | { |
988 | unsigned int __user *epc; | 1054 | unsigned int __user *epc; |
989 | unsigned long old_epc; | 1055 | unsigned long old_epc, old31; |
990 | unsigned int opcode; | 1056 | unsigned int opcode; |
991 | unsigned int cpid; | 1057 | unsigned int cpid; |
992 | int status; | 1058 | int status; |
@@ -1000,26 +1066,41 @@ asmlinkage void do_cpu(struct pt_regs *regs) | |||
1000 | case 0: | 1066 | case 0: |
1001 | epc = (unsigned int __user *)exception_epc(regs); | 1067 | epc = (unsigned int __user *)exception_epc(regs); |
1002 | old_epc = regs->cp0_epc; | 1068 | old_epc = regs->cp0_epc; |
1069 | old31 = regs->regs[31]; | ||
1003 | opcode = 0; | 1070 | opcode = 0; |
1004 | status = -1; | 1071 | status = -1; |
1005 | 1072 | ||
1006 | if (unlikely(compute_return_epc(regs) < 0)) | 1073 | if (unlikely(compute_return_epc(regs) < 0)) |
1007 | return; | 1074 | return; |
1008 | 1075 | ||
1009 | if (unlikely(get_user(opcode, epc) < 0)) | 1076 | if (get_isa16_mode(regs->cp0_epc)) { |
1010 | status = SIGSEGV; | 1077 | unsigned short mmop[2] = { 0 }; |
1011 | 1078 | ||
1012 | if (!cpu_has_llsc && status < 0) | 1079 | if (unlikely(get_user(mmop[0], epc) < 0)) |
1013 | status = simulate_llsc(regs, opcode); | 1080 | status = SIGSEGV; |
1081 | if (unlikely(get_user(mmop[1], epc) < 0)) | ||
1082 | status = SIGSEGV; | ||
1083 | opcode = (mmop[0] << 16) | mmop[1]; | ||
1014 | 1084 | ||
1015 | if (status < 0) | 1085 | if (status < 0) |
1016 | status = simulate_rdhwr(regs, opcode); | 1086 | status = simulate_rdhwr_mm(regs, opcode); |
1087 | } else { | ||
1088 | if (unlikely(get_user(opcode, epc) < 0)) | ||
1089 | status = SIGSEGV; | ||
1090 | |||
1091 | if (!cpu_has_llsc && status < 0) | ||
1092 | status = simulate_llsc(regs, opcode); | ||
1093 | |||
1094 | if (status < 0) | ||
1095 | status = simulate_rdhwr_normal(regs, opcode); | ||
1096 | } | ||
1017 | 1097 | ||
1018 | if (status < 0) | 1098 | if (status < 0) |
1019 | status = SIGILL; | 1099 | status = SIGILL; |
1020 | 1100 | ||
1021 | if (unlikely(status > 0)) { | 1101 | if (unlikely(status > 0)) { |
1022 | regs->cp0_epc = old_epc; /* Undo skip-over. */ | 1102 | regs->cp0_epc = old_epc; /* Undo skip-over. */ |
1103 | regs->regs[31] = old31; | ||
1023 | force_sig(status, current); | 1104 | force_sig(status, current); |
1024 | } | 1105 | } |
1025 | 1106 | ||
@@ -1333,7 +1414,7 @@ asmlinkage void cache_parity_error(void) | |||
1333 | void ejtag_exception_handler(struct pt_regs *regs) | 1414 | void ejtag_exception_handler(struct pt_regs *regs) |
1334 | { | 1415 | { |
1335 | const int field = 2 * sizeof(unsigned long); | 1416 | const int field = 2 * sizeof(unsigned long); |
1336 | unsigned long depc, old_epc; | 1417 | unsigned long depc, old_epc, old_ra; |
1337 | unsigned int debug; | 1418 | unsigned int debug; |
1338 | 1419 | ||
1339 | printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); | 1420 | printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); |
@@ -1348,10 +1429,12 @@ void ejtag_exception_handler(struct pt_regs *regs) | |||
1348 | * calculation. | 1429 | * calculation. |
1349 | */ | 1430 | */ |
1350 | old_epc = regs->cp0_epc; | 1431 | old_epc = regs->cp0_epc; |
1432 | old_ra = regs->regs[31]; | ||
1351 | regs->cp0_epc = depc; | 1433 | regs->cp0_epc = depc; |
1352 | __compute_return_epc(regs); | 1434 | compute_return_epc(regs); |
1353 | depc = regs->cp0_epc; | 1435 | depc = regs->cp0_epc; |
1354 | regs->cp0_epc = old_epc; | 1436 | regs->cp0_epc = old_epc; |
1437 | regs->regs[31] = old_ra; | ||
1355 | } else | 1438 | } else |
1356 | depc += 4; | 1439 | depc += 4; |
1357 | write_c0_depc(depc); | 1440 | write_c0_depc(depc); |
@@ -1392,9 +1475,24 @@ void __init *set_except_vector(int n, void *addr) | |||
1392 | unsigned long handler = (unsigned long) addr; | 1475 | unsigned long handler = (unsigned long) addr; |
1393 | unsigned long old_handler = exception_handlers[n]; | 1476 | unsigned long old_handler = exception_handlers[n]; |
1394 | 1477 | ||
1478 | #ifdef CONFIG_CPU_MICROMIPS | ||
1479 | /* | ||
1480 | * Only the TLB handlers are cache aligned with an even | ||
1481 | * address. All other handlers are on an odd address and | ||
1482 | * require no modification. Otherwise, MIPS32 mode will | ||
1483 | * be entered when handling any TLB exceptions. That | ||
1484 | * would be bad...since we must stay in microMIPS mode. | ||
1485 | */ | ||
1486 | if (!(handler & 0x1)) | ||
1487 | handler |= 1; | ||
1488 | #endif | ||
1395 | exception_handlers[n] = handler; | 1489 | exception_handlers[n] = handler; |
1396 | if (n == 0 && cpu_has_divec) { | 1490 | if (n == 0 && cpu_has_divec) { |
1491 | #ifdef CONFIG_CPU_MICROMIPS | ||
1492 | unsigned long jump_mask = ~((1 << 27) - 1); | ||
1493 | #else | ||
1397 | unsigned long jump_mask = ~((1 << 28) - 1); | 1494 | unsigned long jump_mask = ~((1 << 28) - 1); |
1495 | #endif | ||
1398 | u32 *buf = (u32 *)(ebase + 0x200); | 1496 | u32 *buf = (u32 *)(ebase + 0x200); |
1399 | unsigned int k0 = 26; | 1497 | unsigned int k0 = 26; |
1400 | if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { | 1498 | if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { |
@@ -1421,17 +1519,18 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) | |||
1421 | unsigned long handler; | 1519 | unsigned long handler; |
1422 | unsigned long old_handler = vi_handlers[n]; | 1520 | unsigned long old_handler = vi_handlers[n]; |
1423 | int srssets = current_cpu_data.srsets; | 1521 | int srssets = current_cpu_data.srsets; |
1424 | u32 *w; | 1522 | u16 *h; |
1425 | unsigned char *b; | 1523 | unsigned char *b; |
1426 | 1524 | ||
1427 | BUG_ON(!cpu_has_veic && !cpu_has_vint); | 1525 | BUG_ON(!cpu_has_veic && !cpu_has_vint); |
1526 | BUG_ON((n < 0) && (n > 9)); | ||
1428 | 1527 | ||
1429 | if (addr == NULL) { | 1528 | if (addr == NULL) { |
1430 | handler = (unsigned long) do_default_vi; | 1529 | handler = (unsigned long) do_default_vi; |
1431 | srs = 0; | 1530 | srs = 0; |
1432 | } else | 1531 | } else |
1433 | handler = (unsigned long) addr; | 1532 | handler = (unsigned long) addr; |
1434 | vi_handlers[n] = (unsigned long) addr; | 1533 | vi_handlers[n] = handler; |
1435 | 1534 | ||
1436 | b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); | 1535 | b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); |
1437 | 1536 | ||
@@ -1450,9 +1549,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) | |||
1450 | if (srs == 0) { | 1549 | if (srs == 0) { |
1451 | /* | 1550 | /* |
1452 | * If no shadow set is selected then use the default handler | 1551 | * If no shadow set is selected then use the default handler |
1453 | * that does normal register saving and a standard interrupt exit | 1552 | * that does normal register saving and standard interrupt exit |
1454 | */ | 1553 | */ |
1455 | |||
1456 | extern char except_vec_vi, except_vec_vi_lui; | 1554 | extern char except_vec_vi, except_vec_vi_lui; |
1457 | extern char except_vec_vi_ori, except_vec_vi_end; | 1555 | extern char except_vec_vi_ori, except_vec_vi_end; |
1458 | extern char rollback_except_vec_vi; | 1556 | extern char rollback_except_vec_vi; |
@@ -1465,11 +1563,20 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) | |||
1465 | * Status.IM bit to be masked before going there. | 1563 | * Status.IM bit to be masked before going there. |
1466 | */ | 1564 | */ |
1467 | extern char except_vec_vi_mori; | 1565 | extern char except_vec_vi_mori; |
1566 | #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) | ||
1567 | const int mori_offset = &except_vec_vi_mori - vec_start + 2; | ||
1568 | #else | ||
1468 | const int mori_offset = &except_vec_vi_mori - vec_start; | 1569 | const int mori_offset = &except_vec_vi_mori - vec_start; |
1570 | #endif | ||
1469 | #endif /* CONFIG_MIPS_MT_SMTC */ | 1571 | #endif /* CONFIG_MIPS_MT_SMTC */ |
1470 | const int handler_len = &except_vec_vi_end - vec_start; | 1572 | #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) |
1573 | const int lui_offset = &except_vec_vi_lui - vec_start + 2; | ||
1574 | const int ori_offset = &except_vec_vi_ori - vec_start + 2; | ||
1575 | #else | ||
1471 | const int lui_offset = &except_vec_vi_lui - vec_start; | 1576 | const int lui_offset = &except_vec_vi_lui - vec_start; |
1472 | const int ori_offset = &except_vec_vi_ori - vec_start; | 1577 | const int ori_offset = &except_vec_vi_ori - vec_start; |
1578 | #endif | ||
1579 | const int handler_len = &except_vec_vi_end - vec_start; | ||
1473 | 1580 | ||
1474 | if (handler_len > VECTORSPACING) { | 1581 | if (handler_len > VECTORSPACING) { |
1475 | /* | 1582 | /* |
@@ -1479,30 +1586,44 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) | |||
1479 | panic("VECTORSPACING too small"); | 1586 | panic("VECTORSPACING too small"); |
1480 | } | 1587 | } |
1481 | 1588 | ||
1482 | memcpy(b, vec_start, handler_len); | 1589 | set_handler(((unsigned long)b - ebase), vec_start, |
1590 | #ifdef CONFIG_CPU_MICROMIPS | ||
1591 | (handler_len - 1)); | ||
1592 | #else | ||
1593 | handler_len); | ||
1594 | #endif | ||
1483 | #ifdef CONFIG_MIPS_MT_SMTC | 1595 | #ifdef CONFIG_MIPS_MT_SMTC |
1484 | BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ | 1596 | BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ |
1485 | 1597 | ||
1486 | w = (u32 *)(b + mori_offset); | 1598 | h = (u16 *)(b + mori_offset); |
1487 | *w = (*w & 0xffff0000) | (0x100 << n); | 1599 | *h = (0x100 << n); |
1488 | #endif /* CONFIG_MIPS_MT_SMTC */ | 1600 | #endif /* CONFIG_MIPS_MT_SMTC */ |
1489 | w = (u32 *)(b + lui_offset); | 1601 | h = (u16 *)(b + lui_offset); |
1490 | *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); | 1602 | *h = (handler >> 16) & 0xffff; |
1491 | w = (u32 *)(b + ori_offset); | 1603 | h = (u16 *)(b + ori_offset); |
1492 | *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); | 1604 | *h = (handler & 0xffff); |
1493 | local_flush_icache_range((unsigned long)b, | 1605 | local_flush_icache_range((unsigned long)b, |
1494 | (unsigned long)(b+handler_len)); | 1606 | (unsigned long)(b+handler_len)); |
1495 | } | 1607 | } |
1496 | else { | 1608 | else { |
1497 | /* | 1609 | /* |
1498 | * In other cases jump directly to the interrupt handler | 1610 | * In other cases jump directly to the interrupt handler. It |
1499 | * | 1611 | * is the handler's responsibility to save registers if required |
1500 | * It is the handlers responsibility to save registers if required | 1612 | * (eg hi/lo) and return from the exception using "eret". |
1501 | * (eg hi/lo) and return from the exception using "eret" | ||
1502 | */ | 1613 | */ |
1503 | w = (u32 *)b; | 1614 | u32 insn; |
1504 | *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ | 1615 | |
1505 | *w = 0; | 1616 | h = (u16 *)b; |
1617 | /* j handler */ | ||
1618 | #ifdef CONFIG_CPU_MICROMIPS | ||
1619 | insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1); | ||
1620 | #else | ||
1621 | insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2); | ||
1622 | #endif | ||
1623 | h[0] = (insn >> 16) & 0xffff; | ||
1624 | h[1] = insn & 0xffff; | ||
1625 | h[2] = 0; | ||
1626 | h[3] = 0; | ||
1506 | local_flush_icache_range((unsigned long)b, | 1627 | local_flush_icache_range((unsigned long)b, |
1507 | (unsigned long)(b+8)); | 1628 | (unsigned long)(b+8)); |
1508 | } | 1629 | } |
@@ -1663,7 +1784,11 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) | |||
1663 | /* Install CPU exception handler */ | 1784 | /* Install CPU exception handler */ |
1664 | void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size) | 1785 | void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size) |
1665 | { | 1786 | { |
1787 | #ifdef CONFIG_CPU_MICROMIPS | ||
1788 | memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size); | ||
1789 | #else | ||
1666 | memcpy((void *)(ebase + offset), addr, size); | 1790 | memcpy((void *)(ebase + offset), addr, size); |
1791 | #endif | ||
1667 | local_flush_icache_range(ebase + offset, ebase + offset + size); | 1792 | local_flush_icache_range(ebase + offset, ebase + offset + size); |
1668 | } | 1793 | } |
1669 | 1794 | ||
@@ -1697,8 +1822,9 @@ __setup("rdhwr_noopt", set_rdhwr_noopt); | |||
1697 | 1822 | ||
1698 | void __init trap_init(void) | 1823 | void __init trap_init(void) |
1699 | { | 1824 | { |
1700 | extern char except_vec3_generic, except_vec3_r4000; | 1825 | extern char except_vec3_generic; |
1701 | extern char except_vec4; | 1826 | extern char except_vec4; |
1827 | extern char except_vec3_r4000; | ||
1702 | unsigned long i; | 1828 | unsigned long i; |
1703 | int rollback; | 1829 | int rollback; |
1704 | 1830 | ||
@@ -1831,11 +1957,11 @@ void __init trap_init(void) | |||
1831 | 1957 | ||
1832 | if (cpu_has_vce) | 1958 | if (cpu_has_vce) |
1833 | /* Special exception: R4[04]00 uses also the divec space. */ | 1959 | /* Special exception: R4[04]00 uses also the divec space. */ |
1834 | memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100); | 1960 | set_handler(0x180, &except_vec3_r4000, 0x100); |
1835 | else if (cpu_has_4kex) | 1961 | else if (cpu_has_4kex) |
1836 | memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80); | 1962 | set_handler(0x180, &except_vec3_generic, 0x80); |
1837 | else | 1963 | else |
1838 | memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80); | 1964 | set_handler(0x080, &except_vec3_generic, 0x80); |
1839 | 1965 | ||
1840 | local_flush_icache_range(ebase, ebase + 0x400); | 1966 | local_flush_icache_range(ebase, ebase + 0x400); |
1841 | flush_tlb_handlers(); | 1967 | flush_tlb_handlers(); |