aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel/traps.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2013-05-09 11:57:30 -0400
committerRalf Baechle <ralf@linux-mips.org>2013-05-09 11:57:30 -0400
commitb22d1b6a91ca4260f869e349179ae53f18c664db (patch)
tree6ac6c2bd202100727638f02ae5037ec78144e8d5 /arch/mips/kernel/traps.c
parent5e0e61dd2c89c673f89fb57dcd3cc746dc0c1706 (diff)
parent0ab2b7d08ea7226dc72ff0f8c05f470566facf7c (diff)
Merge branch 'mti-next' of git://git.linux-mips.org/pub/scm/sjhill/linux-sjhill into mips-for-linux-next
Diffstat (limited to 'arch/mips/kernel/traps.c')
-rw-r--r--arch/mips/kernel/traps.c302
1 files changed, 216 insertions, 86 deletions
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 7a99e60dadbd..3c906e723fd4 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -8,8 +8,8 @@
8 * Copyright (C) 1998 Ulf Carlsson 8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc. 9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2000, 01 MIPS Technologies, Inc.
12 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki 11 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
13 */ 13 */
14#include <linux/bug.h> 14#include <linux/bug.h>
15#include <linux/compiler.h> 15#include <linux/compiler.h>
@@ -83,10 +83,6 @@ extern asmlinkage void handle_dsp(void);
83extern asmlinkage void handle_mcheck(void); 83extern asmlinkage void handle_mcheck(void);
84extern asmlinkage void handle_reserved(void); 84extern asmlinkage void handle_reserved(void);
85 85
86extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
87 struct mips_fpu_struct *ctx, int has_fpu,
88 void *__user *fault_addr);
89
90void (*board_be_init)(void); 86void (*board_be_init)(void);
91int (*board_be_handler)(struct pt_regs *regs, int is_fixup); 87int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
92void (*board_nmi_handler_setup)(void); 88void (*board_nmi_handler_setup)(void);
@@ -495,6 +491,12 @@ asmlinkage void do_be(struct pt_regs *regs)
495#define SYNC 0x0000000f 491#define SYNC 0x0000000f
496#define RDHWR 0x0000003b 492#define RDHWR 0x0000003b
497 493
494/* microMIPS definitions */
495#define MM_POOL32A_FUNC 0xfc00ffff
496#define MM_RDHWR 0x00006b3c
497#define MM_RS 0x001f0000
498#define MM_RT 0x03e00000
499
498/* 500/*
499 * The ll_bit is cleared by r*_switch.S 501 * The ll_bit is cleared by r*_switch.S
500 */ 502 */
@@ -609,42 +611,62 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
609 * Simulate trapping 'rdhwr' instructions to provide user accessible 611 * Simulate trapping 'rdhwr' instructions to provide user accessible
610 * registers not implemented in hardware. 612 * registers not implemented in hardware.
611 */ 613 */
612static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode) 614static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
613{ 615{
614 struct thread_info *ti = task_thread_info(current); 616 struct thread_info *ti = task_thread_info(current);
615 617
618 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
619 1, regs, 0);
620 switch (rd) {
621 case 0: /* CPU number */
622 regs->regs[rt] = smp_processor_id();
623 return 0;
624 case 1: /* SYNCI length */
625 regs->regs[rt] = min(current_cpu_data.dcache.linesz,
626 current_cpu_data.icache.linesz);
627 return 0;
628 case 2: /* Read count register */
629 regs->regs[rt] = read_c0_count();
630 return 0;
631 case 3: /* Count register resolution */
632 switch (current_cpu_data.cputype) {
633 case CPU_20KC:
634 case CPU_25KF:
635 regs->regs[rt] = 1;
636 break;
637 default:
638 regs->regs[rt] = 2;
639 }
640 return 0;
641 case 29:
642 regs->regs[rt] = ti->tp_value;
643 return 0;
644 default:
645 return -1;
646 }
647}
648
649static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
650{
616 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { 651 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
617 int rd = (opcode & RD) >> 11; 652 int rd = (opcode & RD) >> 11;
618 int rt = (opcode & RT) >> 16; 653 int rt = (opcode & RT) >> 16;
619 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 654
620 1, regs, 0); 655 simulate_rdhwr(regs, rd, rt);
621 switch (rd) { 656 return 0;
622 case 0: /* CPU number */ 657 }
623 regs->regs[rt] = smp_processor_id(); 658
624 return 0; 659 /* Not ours. */
625 case 1: /* SYNCI length */ 660 return -1;
626 regs->regs[rt] = min(current_cpu_data.dcache.linesz, 661}
627 current_cpu_data.icache.linesz); 662
628 return 0; 663static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
629 case 2: /* Read count register */ 664{
630 regs->regs[rt] = read_c0_count(); 665 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
631 return 0; 666 int rd = (opcode & MM_RS) >> 16;
632 case 3: /* Count register resolution */ 667 int rt = (opcode & MM_RT) >> 21;
633 switch (current_cpu_data.cputype) { 668 simulate_rdhwr(regs, rd, rt);
634 case CPU_20KC: 669 return 0;
635 case CPU_25KF:
636 regs->regs[rt] = 1;
637 break;
638 default:
639 regs->regs[rt] = 2;
640 }
641 return 0;
642 case 29:
643 regs->regs[rt] = ti->tp_value;
644 return 0;
645 default:
646 return -1;
647 }
648 } 670 }
649 671
650 /* Not ours. */ 672 /* Not ours. */
@@ -675,7 +697,7 @@ asmlinkage void do_ov(struct pt_regs *regs)
675 force_sig_info(SIGFPE, &info, current); 697 force_sig_info(SIGFPE, &info, current);
676} 698}
677 699
678static int process_fpemu_return(int sig, void __user *fault_addr) 700int process_fpemu_return(int sig, void __user *fault_addr)
679{ 701{
680 if (sig == SIGSEGV || sig == SIGBUS) { 702 if (sig == SIGSEGV || sig == SIGBUS) {
681 struct siginfo si = {0}; 703 struct siginfo si = {0};
@@ -826,9 +848,29 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
826asmlinkage void do_bp(struct pt_regs *regs) 848asmlinkage void do_bp(struct pt_regs *regs)
827{ 849{
828 unsigned int opcode, bcode; 850 unsigned int opcode, bcode;
829 851 unsigned long epc;
830 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) 852 u16 instr[2];
831 goto out_sigsegv; 853
854 if (get_isa16_mode(regs->cp0_epc)) {
855 /* Calculate EPC. */
856 epc = exception_epc(regs);
857 if (cpu_has_mmips) {
858 if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
859 (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
860 goto out_sigsegv;
861 opcode = (instr[0] << 16) | instr[1];
862 } else {
863 /* MIPS16e mode */
864 if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)))
865 goto out_sigsegv;
866 bcode = (instr[0] >> 6) & 0x3f;
867 do_trap_or_bp(regs, bcode, "Break");
868 return;
869 }
870 } else {
871 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
872 goto out_sigsegv;
873 }
832 874
833 /* 875 /*
834 * There is the ancient bug in the MIPS assemblers that the break 876 * There is the ancient bug in the MIPS assemblers that the break
@@ -869,13 +911,22 @@ out_sigsegv:
869asmlinkage void do_tr(struct pt_regs *regs) 911asmlinkage void do_tr(struct pt_regs *regs)
870{ 912{
871 unsigned int opcode, tcode = 0; 913 unsigned int opcode, tcode = 0;
914 u16 instr[2];
915 unsigned long epc = exception_epc(regs);
872 916
873 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) 917 if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) ||
874 goto out_sigsegv; 918 (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))
919 goto out_sigsegv;
920 opcode = (instr[0] << 16) | instr[1];
875 921
876 /* Immediate versions don't provide a code. */ 922 /* Immediate versions don't provide a code. */
877 if (!(opcode & OPCODE)) 923 if (!(opcode & OPCODE)) {
878 tcode = ((opcode >> 6) & ((1 << 10) - 1)); 924 if (get_isa16_mode(regs->cp0_epc))
925 /* microMIPS */
926 tcode = (opcode >> 12) & 0x1f;
927 else
928 tcode = ((opcode >> 6) & ((1 << 10) - 1));
929 }
879 930
880 do_trap_or_bp(regs, tcode, "Trap"); 931 do_trap_or_bp(regs, tcode, "Trap");
881 return; 932 return;
@@ -888,6 +939,7 @@ asmlinkage void do_ri(struct pt_regs *regs)
888{ 939{
889 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); 940 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
890 unsigned long old_epc = regs->cp0_epc; 941 unsigned long old_epc = regs->cp0_epc;
942 unsigned long old31 = regs->regs[31];
891 unsigned int opcode = 0; 943 unsigned int opcode = 0;
892 int status = -1; 944 int status = -1;
893 945
@@ -900,23 +952,37 @@ asmlinkage void do_ri(struct pt_regs *regs)
900 if (unlikely(compute_return_epc(regs) < 0)) 952 if (unlikely(compute_return_epc(regs) < 0))
901 return; 953 return;
902 954
903 if (unlikely(get_user(opcode, epc) < 0)) 955 if (get_isa16_mode(regs->cp0_epc)) {
904 status = SIGSEGV; 956 unsigned short mmop[2] = { 0 };
905 957
906 if (!cpu_has_llsc && status < 0) 958 if (unlikely(get_user(mmop[0], epc) < 0))
907 status = simulate_llsc(regs, opcode); 959 status = SIGSEGV;
960 if (unlikely(get_user(mmop[1], epc) < 0))
961 status = SIGSEGV;
962 opcode = (mmop[0] << 16) | mmop[1];
908 963
909 if (status < 0) 964 if (status < 0)
910 status = simulate_rdhwr(regs, opcode); 965 status = simulate_rdhwr_mm(regs, opcode);
966 } else {
967 if (unlikely(get_user(opcode, epc) < 0))
968 status = SIGSEGV;
911 969
912 if (status < 0) 970 if (!cpu_has_llsc && status < 0)
913 status = simulate_sync(regs, opcode); 971 status = simulate_llsc(regs, opcode);
972
973 if (status < 0)
974 status = simulate_rdhwr_normal(regs, opcode);
975
976 if (status < 0)
977 status = simulate_sync(regs, opcode);
978 }
914 979
915 if (status < 0) 980 if (status < 0)
916 status = SIGILL; 981 status = SIGILL;
917 982
918 if (unlikely(status > 0)) { 983 if (unlikely(status > 0)) {
919 regs->cp0_epc = old_epc; /* Undo skip-over. */ 984 regs->cp0_epc = old_epc; /* Undo skip-over. */
985 regs->regs[31] = old31;
920 force_sig(status, current); 986 force_sig(status, current);
921 } 987 }
922} 988}
@@ -986,7 +1052,7 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
986asmlinkage void do_cpu(struct pt_regs *regs) 1052asmlinkage void do_cpu(struct pt_regs *regs)
987{ 1053{
988 unsigned int __user *epc; 1054 unsigned int __user *epc;
989 unsigned long old_epc; 1055 unsigned long old_epc, old31;
990 unsigned int opcode; 1056 unsigned int opcode;
991 unsigned int cpid; 1057 unsigned int cpid;
992 int status; 1058 int status;
@@ -1000,26 +1066,41 @@ asmlinkage void do_cpu(struct pt_regs *regs)
1000 case 0: 1066 case 0:
1001 epc = (unsigned int __user *)exception_epc(regs); 1067 epc = (unsigned int __user *)exception_epc(regs);
1002 old_epc = regs->cp0_epc; 1068 old_epc = regs->cp0_epc;
1069 old31 = regs->regs[31];
1003 opcode = 0; 1070 opcode = 0;
1004 status = -1; 1071 status = -1;
1005 1072
1006 if (unlikely(compute_return_epc(regs) < 0)) 1073 if (unlikely(compute_return_epc(regs) < 0))
1007 return; 1074 return;
1008 1075
1009 if (unlikely(get_user(opcode, epc) < 0)) 1076 if (get_isa16_mode(regs->cp0_epc)) {
1010 status = SIGSEGV; 1077 unsigned short mmop[2] = { 0 };
1011 1078
1012 if (!cpu_has_llsc && status < 0) 1079 if (unlikely(get_user(mmop[0], epc) < 0))
1013 status = simulate_llsc(regs, opcode); 1080 status = SIGSEGV;
1081 if (unlikely(get_user(mmop[1], epc) < 0))
1082 status = SIGSEGV;
1083 opcode = (mmop[0] << 16) | mmop[1];
1014 1084
1015 if (status < 0) 1085 if (status < 0)
1016 status = simulate_rdhwr(regs, opcode); 1086 status = simulate_rdhwr_mm(regs, opcode);
1087 } else {
1088 if (unlikely(get_user(opcode, epc) < 0))
1089 status = SIGSEGV;
1090
1091 if (!cpu_has_llsc && status < 0)
1092 status = simulate_llsc(regs, opcode);
1093
1094 if (status < 0)
1095 status = simulate_rdhwr_normal(regs, opcode);
1096 }
1017 1097
1018 if (status < 0) 1098 if (status < 0)
1019 status = SIGILL; 1099 status = SIGILL;
1020 1100
1021 if (unlikely(status > 0)) { 1101 if (unlikely(status > 0)) {
1022 regs->cp0_epc = old_epc; /* Undo skip-over. */ 1102 regs->cp0_epc = old_epc; /* Undo skip-over. */
1103 regs->regs[31] = old31;
1023 force_sig(status, current); 1104 force_sig(status, current);
1024 } 1105 }
1025 1106
@@ -1333,7 +1414,7 @@ asmlinkage void cache_parity_error(void)
1333void ejtag_exception_handler(struct pt_regs *regs) 1414void ejtag_exception_handler(struct pt_regs *regs)
1334{ 1415{
1335 const int field = 2 * sizeof(unsigned long); 1416 const int field = 2 * sizeof(unsigned long);
1336 unsigned long depc, old_epc; 1417 unsigned long depc, old_epc, old_ra;
1337 unsigned int debug; 1418 unsigned int debug;
1338 1419
1339 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); 1420 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
@@ -1348,10 +1429,12 @@ void ejtag_exception_handler(struct pt_regs *regs)
1348 * calculation. 1429 * calculation.
1349 */ 1430 */
1350 old_epc = regs->cp0_epc; 1431 old_epc = regs->cp0_epc;
1432 old_ra = regs->regs[31];
1351 regs->cp0_epc = depc; 1433 regs->cp0_epc = depc;
1352 __compute_return_epc(regs); 1434 compute_return_epc(regs);
1353 depc = regs->cp0_epc; 1435 depc = regs->cp0_epc;
1354 regs->cp0_epc = old_epc; 1436 regs->cp0_epc = old_epc;
1437 regs->regs[31] = old_ra;
1355 } else 1438 } else
1356 depc += 4; 1439 depc += 4;
1357 write_c0_depc(depc); 1440 write_c0_depc(depc);
@@ -1390,10 +1473,27 @@ unsigned long vi_handlers[64];
1390void __init *set_except_vector(int n, void *addr) 1473void __init *set_except_vector(int n, void *addr)
1391{ 1474{
1392 unsigned long handler = (unsigned long) addr; 1475 unsigned long handler = (unsigned long) addr;
1393 unsigned long old_handler = xchg(&exception_handlers[n], handler); 1476 unsigned long old_handler;
1477
1478#ifdef CONFIG_CPU_MICROMIPS
1479 /*
1480 * Only the TLB handlers are cache aligned with an even
1481 * address. All other handlers are on an odd address and
1482 * require no modification. Otherwise, MIPS32 mode will
1483 * be entered when handling any TLB exceptions. That
1484 * would be bad...since we must stay in microMIPS mode.
1485 */
1486 if (!(handler & 0x1))
1487 handler |= 1;
1488#endif
1489 old_handler = xchg(&exception_handlers[n], handler);
1394 1490
1395 if (n == 0 && cpu_has_divec) { 1491 if (n == 0 && cpu_has_divec) {
1492#ifdef CONFIG_CPU_MICROMIPS
1493 unsigned long jump_mask = ~((1 << 27) - 1);
1494#else
1396 unsigned long jump_mask = ~((1 << 28) - 1); 1495 unsigned long jump_mask = ~((1 << 28) - 1);
1496#endif
1397 u32 *buf = (u32 *)(ebase + 0x200); 1497 u32 *buf = (u32 *)(ebase + 0x200);
1398 unsigned int k0 = 26; 1498 unsigned int k0 = 26;
1399 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { 1499 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
@@ -1420,17 +1520,18 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1420 unsigned long handler; 1520 unsigned long handler;
1421 unsigned long old_handler = vi_handlers[n]; 1521 unsigned long old_handler = vi_handlers[n];
1422 int srssets = current_cpu_data.srsets; 1522 int srssets = current_cpu_data.srsets;
1423 u32 *w; 1523 u16 *h;
1424 unsigned char *b; 1524 unsigned char *b;
1425 1525
1426 BUG_ON(!cpu_has_veic && !cpu_has_vint); 1526 BUG_ON(!cpu_has_veic && !cpu_has_vint);
1527 BUG_ON((n < 0) && (n > 9));
1427 1528
1428 if (addr == NULL) { 1529 if (addr == NULL) {
1429 handler = (unsigned long) do_default_vi; 1530 handler = (unsigned long) do_default_vi;
1430 srs = 0; 1531 srs = 0;
1431 } else 1532 } else
1432 handler = (unsigned long) addr; 1533 handler = (unsigned long) addr;
1433 vi_handlers[n] = (unsigned long) addr; 1534 vi_handlers[n] = handler;
1434 1535
1435 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); 1536 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1436 1537
@@ -1449,9 +1550,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1449 if (srs == 0) { 1550 if (srs == 0) {
1450 /* 1551 /*
1451 * If no shadow set is selected then use the default handler 1552 * If no shadow set is selected then use the default handler
1452 * that does normal register saving and a standard interrupt exit 1553 * that does normal register saving and standard interrupt exit
1453 */ 1554 */
1454
1455 extern char except_vec_vi, except_vec_vi_lui; 1555 extern char except_vec_vi, except_vec_vi_lui;
1456 extern char except_vec_vi_ori, except_vec_vi_end; 1556 extern char except_vec_vi_ori, except_vec_vi_end;
1457 extern char rollback_except_vec_vi; 1557 extern char rollback_except_vec_vi;
@@ -1464,11 +1564,20 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1464 * Status.IM bit to be masked before going there. 1564 * Status.IM bit to be masked before going there.
1465 */ 1565 */
1466 extern char except_vec_vi_mori; 1566 extern char except_vec_vi_mori;
1567#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1568 const int mori_offset = &except_vec_vi_mori - vec_start + 2;
1569#else
1467 const int mori_offset = &except_vec_vi_mori - vec_start; 1570 const int mori_offset = &except_vec_vi_mori - vec_start;
1571#endif
1468#endif /* CONFIG_MIPS_MT_SMTC */ 1572#endif /* CONFIG_MIPS_MT_SMTC */
1469 const int handler_len = &except_vec_vi_end - vec_start; 1573#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1574 const int lui_offset = &except_vec_vi_lui - vec_start + 2;
1575 const int ori_offset = &except_vec_vi_ori - vec_start + 2;
1576#else
1470 const int lui_offset = &except_vec_vi_lui - vec_start; 1577 const int lui_offset = &except_vec_vi_lui - vec_start;
1471 const int ori_offset = &except_vec_vi_ori - vec_start; 1578 const int ori_offset = &except_vec_vi_ori - vec_start;
1579#endif
1580 const int handler_len = &except_vec_vi_end - vec_start;
1472 1581
1473 if (handler_len > VECTORSPACING) { 1582 if (handler_len > VECTORSPACING) {
1474 /* 1583 /*
@@ -1478,30 +1587,44 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1478 panic("VECTORSPACING too small"); 1587 panic("VECTORSPACING too small");
1479 } 1588 }
1480 1589
1481 memcpy(b, vec_start, handler_len); 1590 set_handler(((unsigned long)b - ebase), vec_start,
1591#ifdef CONFIG_CPU_MICROMIPS
1592 (handler_len - 1));
1593#else
1594 handler_len);
1595#endif
1482#ifdef CONFIG_MIPS_MT_SMTC 1596#ifdef CONFIG_MIPS_MT_SMTC
1483 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ 1597 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
1484 1598
1485 w = (u32 *)(b + mori_offset); 1599 h = (u16 *)(b + mori_offset);
1486 *w = (*w & 0xffff0000) | (0x100 << n); 1600 *h = (0x100 << n);
1487#endif /* CONFIG_MIPS_MT_SMTC */ 1601#endif /* CONFIG_MIPS_MT_SMTC */
1488 w = (u32 *)(b + lui_offset); 1602 h = (u16 *)(b + lui_offset);
1489 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); 1603 *h = (handler >> 16) & 0xffff;
1490 w = (u32 *)(b + ori_offset); 1604 h = (u16 *)(b + ori_offset);
1491 *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); 1605 *h = (handler & 0xffff);
1492 local_flush_icache_range((unsigned long)b, 1606 local_flush_icache_range((unsigned long)b,
1493 (unsigned long)(b+handler_len)); 1607 (unsigned long)(b+handler_len));
1494 } 1608 }
1495 else { 1609 else {
1496 /* 1610 /*
1497 * In other cases jump directly to the interrupt handler 1611 * In other cases jump directly to the interrupt handler. It
1498 * 1612 * is the handler's responsibility to save registers if required
1499 * It is the handlers responsibility to save registers if required 1613 * (eg hi/lo) and return from the exception using "eret".
1500 * (eg hi/lo) and return from the exception using "eret"
1501 */ 1614 */
1502 w = (u32 *)b; 1615 u32 insn;
1503 *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ 1616
1504 *w = 0; 1617 h = (u16 *)b;
1618 /* j handler */
1619#ifdef CONFIG_CPU_MICROMIPS
1620 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
1621#else
1622 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
1623#endif
1624 h[0] = (insn >> 16) & 0xffff;
1625 h[1] = insn & 0xffff;
1626 h[2] = 0;
1627 h[3] = 0;
1505 local_flush_icache_range((unsigned long)b, 1628 local_flush_icache_range((unsigned long)b,
1506 (unsigned long)(b+8)); 1629 (unsigned long)(b+8));
1507 } 1630 }
@@ -1546,6 +1669,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1546 unsigned int cpu = smp_processor_id(); 1669 unsigned int cpu = smp_processor_id();
1547 unsigned int status_set = ST0_CU0; 1670 unsigned int status_set = ST0_CU0;
1548 unsigned int hwrena = cpu_hwrena_impl_bits; 1671 unsigned int hwrena = cpu_hwrena_impl_bits;
1672 unsigned long asid = 0;
1549#ifdef CONFIG_MIPS_MT_SMTC 1673#ifdef CONFIG_MIPS_MT_SMTC
1550 int secondaryTC = 0; 1674 int secondaryTC = 0;
1551 int bootTC = (cpu == 0); 1675 int bootTC = (cpu == 0);
@@ -1629,8 +1753,9 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1629 } 1753 }
1630#endif /* CONFIG_MIPS_MT_SMTC */ 1754#endif /* CONFIG_MIPS_MT_SMTC */
1631 1755
1632 if (!cpu_data[cpu].asid_cache) 1756 asid = ASID_FIRST_VERSION;
1633 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; 1757 cpu_data[cpu].asid_cache = asid;
1758 TLBMISS_HANDLER_SETUP();
1634 1759
1635 atomic_inc(&init_mm.mm_count); 1760 atomic_inc(&init_mm.mm_count);
1636 current->active_mm = &init_mm; 1761 current->active_mm = &init_mm;
@@ -1660,7 +1785,11 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1660/* Install CPU exception handler */ 1785/* Install CPU exception handler */
1661void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size) 1786void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
1662{ 1787{
1788#ifdef CONFIG_CPU_MICROMIPS
1789 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
1790#else
1663 memcpy((void *)(ebase + offset), addr, size); 1791 memcpy((void *)(ebase + offset), addr, size);
1792#endif
1664 local_flush_icache_range(ebase + offset, ebase + offset + size); 1793 local_flush_icache_range(ebase + offset, ebase + offset + size);
1665} 1794}
1666 1795
@@ -1694,8 +1823,9 @@ __setup("rdhwr_noopt", set_rdhwr_noopt);
1694 1823
1695void __init trap_init(void) 1824void __init trap_init(void)
1696{ 1825{
1697 extern char except_vec3_generic, except_vec3_r4000; 1826 extern char except_vec3_generic;
1698 extern char except_vec4; 1827 extern char except_vec4;
1828 extern char except_vec3_r4000;
1699 unsigned long i; 1829 unsigned long i;
1700 int rollback; 1830 int rollback;
1701 1831
@@ -1833,11 +1963,11 @@ void __init trap_init(void)
1833 1963
1834 if (cpu_has_vce) 1964 if (cpu_has_vce)
1835 /* Special exception: R4[04]00 uses also the divec space. */ 1965 /* Special exception: R4[04]00 uses also the divec space. */
1836 memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100); 1966 set_handler(0x180, &except_vec3_r4000, 0x100);
1837 else if (cpu_has_4kex) 1967 else if (cpu_has_4kex)
1838 memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80); 1968 set_handler(0x180, &except_vec3_generic, 0x80);
1839 else 1969 else
1840 memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80); 1970 set_handler(0x080, &except_vec3_generic, 0x80);
1841 1971
1842 local_flush_icache_range(ebase, ebase + 0x400); 1972 local_flush_icache_range(ebase, ebase + 0x400);
1843 flush_tlb_handlers(); 1973 flush_tlb_handlers();