diff options
Diffstat (limited to 'arch/mips/kernel/traps.c')
| -rw-r--r-- | arch/mips/kernel/traps.c | 212 |
1 files changed, 173 insertions, 39 deletions
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index bed0eb6cf55d..4901f0a37fca 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
| @@ -42,6 +42,7 @@ | |||
| 42 | #include <asm/watch.h> | 42 | #include <asm/watch.h> |
| 43 | #include <asm/types.h> | 43 | #include <asm/types.h> |
| 44 | 44 | ||
| 45 | extern asmlinkage void handle_int(void); | ||
| 45 | extern asmlinkage void handle_tlbm(void); | 46 | extern asmlinkage void handle_tlbm(void); |
| 46 | extern asmlinkage void handle_tlbl(void); | 47 | extern asmlinkage void handle_tlbl(void); |
| 47 | extern asmlinkage void handle_tlbs(void); | 48 | extern asmlinkage void handle_tlbs(void); |
| @@ -279,9 +280,16 @@ static DEFINE_SPINLOCK(die_lock); | |||
| 279 | NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs) | 280 | NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs) |
| 280 | { | 281 | { |
| 281 | static int die_counter; | 282 | static int die_counter; |
| 283 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 284 | unsigned long dvpret = dvpe(); | ||
| 285 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
| 282 | 286 | ||
| 283 | console_verbose(); | 287 | console_verbose(); |
| 284 | spin_lock_irq(&die_lock); | 288 | spin_lock_irq(&die_lock); |
| 289 | bust_spinlocks(1); | ||
| 290 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 291 | mips_mt_regdump(dvpret); | ||
| 292 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
| 285 | printk("%s[#%d]:\n", str, ++die_counter); | 293 | printk("%s[#%d]:\n", str, ++die_counter); |
| 286 | show_registers(regs); | 294 | show_registers(regs); |
| 287 | spin_unlock_irq(&die_lock); | 295 | spin_unlock_irq(&die_lock); |
| @@ -750,12 +758,43 @@ asmlinkage void do_cpu(struct pt_regs *regs) | |||
| 750 | ¤t->thread.fpu.soft); | 758 | ¤t->thread.fpu.soft); |
| 751 | if (sig) | 759 | if (sig) |
| 752 | force_sig(sig, current); | 760 | force_sig(sig, current); |
| 761 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
| 762 | else { | ||
| 763 | /* | ||
| 764 | * MIPS MT processors may have fewer FPU contexts | ||
| 765 | * than CPU threads. If we've emulated more than | ||
| 766 | * some threshold number of instructions, force | ||
| 767 | * migration to a "CPU" that has FP support. | ||
| 768 | */ | ||
| 769 | if(mt_fpemul_threshold > 0 | ||
| 770 | && ((current->thread.emulated_fp++ | ||
| 771 | > mt_fpemul_threshold))) { | ||
| 772 | /* | ||
| 773 | * If there's no FPU present, or if the | ||
| 774 | * application has already restricted | ||
| 775 | * the allowed set to exclude any CPUs | ||
| 776 | * with FPUs, we'll skip the procedure. | ||
| 777 | */ | ||
| 778 | if (cpus_intersects(current->cpus_allowed, | ||
| 779 | mt_fpu_cpumask)) { | ||
| 780 | cpumask_t tmask; | ||
| 781 | |||
| 782 | cpus_and(tmask, | ||
| 783 | current->thread.user_cpus_allowed, | ||
| 784 | mt_fpu_cpumask); | ||
| 785 | set_cpus_allowed(current, tmask); | ||
| 786 | current->thread.mflags |= MF_FPUBOUND; | ||
| 787 | } | ||
| 788 | } | ||
| 789 | } | ||
| 790 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
| 753 | } | 791 | } |
| 754 | 792 | ||
| 755 | return; | 793 | return; |
| 756 | 794 | ||
| 757 | case 2: | 795 | case 2: |
| 758 | case 3: | 796 | case 3: |
| 797 | die_if_kernel("do_cpu invoked from kernel context!", regs); | ||
| 759 | break; | 798 | break; |
| 760 | } | 799 | } |
| 761 | 800 | ||
| @@ -793,6 +832,36 @@ asmlinkage void do_mcheck(struct pt_regs *regs) | |||
| 793 | 832 | ||
| 794 | asmlinkage void do_mt(struct pt_regs *regs) | 833 | asmlinkage void do_mt(struct pt_regs *regs) |
| 795 | { | 834 | { |
| 835 | int subcode; | ||
| 836 | |||
| 837 | die_if_kernel("MIPS MT Thread exception in kernel", regs); | ||
| 838 | |||
| 839 | subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT) | ||
| 840 | >> VPECONTROL_EXCPT_SHIFT; | ||
| 841 | switch (subcode) { | ||
| 842 | case 0: | ||
| 843 | printk(KERN_ERR "Thread Underflow\n"); | ||
| 844 | break; | ||
| 845 | case 1: | ||
| 846 | printk(KERN_ERR "Thread Overflow\n"); | ||
| 847 | break; | ||
| 848 | case 2: | ||
| 849 | printk(KERN_ERR "Invalid YIELD Qualifier\n"); | ||
| 850 | break; | ||
| 851 | case 3: | ||
| 852 | printk(KERN_ERR "Gating Storage Exception\n"); | ||
| 853 | break; | ||
| 854 | case 4: | ||
| 855 | printk(KERN_ERR "YIELD Scheduler Exception\n"); | ||
| 856 | break; | ||
| 857 | case 5: | ||
| 858 | printk(KERN_ERR "Gating Storage Schedulier Exception\n"); | ||
| 859 | break; | ||
| 860 | default: | ||
| 861 | printk(KERN_ERR "*** UNKNOWN THREAD EXCEPTION %d ***\n", | ||
| 862 | subcode); | ||
| 863 | break; | ||
| 864 | } | ||
| 796 | die_if_kernel("MIPS MT Thread exception in kernel", regs); | 865 | die_if_kernel("MIPS MT Thread exception in kernel", regs); |
| 797 | 866 | ||
| 798 | force_sig(SIGILL, current); | 867 | force_sig(SIGILL, current); |
| @@ -928,7 +997,15 @@ void ejtag_exception_handler(struct pt_regs *regs) | |||
| 928 | */ | 997 | */ |
| 929 | void nmi_exception_handler(struct pt_regs *regs) | 998 | void nmi_exception_handler(struct pt_regs *regs) |
| 930 | { | 999 | { |
| 1000 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 1001 | unsigned long dvpret = dvpe(); | ||
| 1002 | bust_spinlocks(1); | ||
| 1003 | printk("NMI taken!!!!\n"); | ||
| 1004 | mips_mt_regdump(dvpret); | ||
| 1005 | #else | ||
| 1006 | bust_spinlocks(1); | ||
| 931 | printk("NMI taken!!!!\n"); | 1007 | printk("NMI taken!!!!\n"); |
| 1008 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
| 932 | die("NMI", regs); | 1009 | die("NMI", regs); |
| 933 | while(1) ; | 1010 | while(1) ; |
| 934 | } | 1011 | } |
| @@ -960,27 +1037,29 @@ void *set_except_vector(int n, void *addr) | |||
| 960 | 1037 | ||
| 961 | #ifdef CONFIG_CPU_MIPSR2 | 1038 | #ifdef CONFIG_CPU_MIPSR2 |
| 962 | /* | 1039 | /* |
| 963 | * Shadow register allocation | 1040 | * MIPSR2 shadow register set allocation |
| 964 | * FIXME: SMP... | 1041 | * FIXME: SMP... |
| 965 | */ | 1042 | */ |
| 966 | 1043 | ||
| 967 | /* MIPSR2 shadow register sets */ | 1044 | static struct shadow_registers { |
| 968 | struct shadow_registers { | 1045 | /* |
| 969 | spinlock_t sr_lock; /* */ | 1046 | * Number of shadow register sets supported |
| 970 | int sr_supported; /* Number of shadow register sets supported */ | 1047 | */ |
| 971 | int sr_allocated; /* Bitmap of allocated shadow registers */ | 1048 | unsigned long sr_supported; |
| 1049 | /* | ||
| 1050 | * Bitmap of allocated shadow registers | ||
| 1051 | */ | ||
| 1052 | unsigned long sr_allocated; | ||
| 972 | } shadow_registers; | 1053 | } shadow_registers; |
| 973 | 1054 | ||
| 974 | void mips_srs_init(void) | 1055 | static void mips_srs_init(void) |
| 975 | { | 1056 | { |
| 976 | #ifdef CONFIG_CPU_MIPSR2_SRS | 1057 | #ifdef CONFIG_CPU_MIPSR2_SRS |
| 977 | shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1; | 1058 | shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1; |
| 978 | printk ("%d MIPSR2 register sets available\n", shadow_registers.sr_supported); | 1059 | printk(KERN_INFO "%d MIPSR2 register sets available\n", |
| 979 | #else | 1060 | shadow_registers.sr_supported); |
| 980 | shadow_registers.sr_supported = 1; | ||
| 981 | #endif | 1061 | #endif |
| 982 | shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */ | 1062 | shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */ |
| 983 | spin_lock_init(&shadow_registers.sr_lock); | ||
| 984 | } | 1063 | } |
| 985 | 1064 | ||
| 986 | int mips_srs_max(void) | 1065 | int mips_srs_max(void) |
| @@ -988,38 +1067,30 @@ int mips_srs_max(void) | |||
| 988 | return shadow_registers.sr_supported; | 1067 | return shadow_registers.sr_supported; |
| 989 | } | 1068 | } |
| 990 | 1069 | ||
| 991 | int mips_srs_alloc (void) | 1070 | int mips_srs_alloc(void) |
| 992 | { | 1071 | { |
| 993 | struct shadow_registers *sr = &shadow_registers; | 1072 | struct shadow_registers *sr = &shadow_registers; |
| 994 | unsigned long flags; | ||
| 995 | int set; | 1073 | int set; |
| 996 | 1074 | ||
| 997 | spin_lock_irqsave(&sr->sr_lock, flags); | 1075 | again: |
| 1076 | set = find_first_zero_bit(&sr->sr_allocated, sr->sr_supported); | ||
| 1077 | if (set >= sr->sr_supported) | ||
| 1078 | return -1; | ||
| 998 | 1079 | ||
| 999 | for (set = 0; set < sr->sr_supported; set++) { | 1080 | if (test_and_set_bit(set, &sr->sr_allocated)) |
| 1000 | if ((sr->sr_allocated & (1 << set)) == 0) { | 1081 | goto again; |
| 1001 | sr->sr_allocated |= 1 << set; | ||
| 1002 | spin_unlock_irqrestore(&sr->sr_lock, flags); | ||
| 1003 | return set; | ||
| 1004 | } | ||
| 1005 | } | ||
| 1006 | 1082 | ||
| 1007 | /* None available */ | 1083 | return set; |
| 1008 | spin_unlock_irqrestore(&sr->sr_lock, flags); | ||
| 1009 | return -1; | ||
| 1010 | } | 1084 | } |
| 1011 | 1085 | ||
| 1012 | void mips_srs_free (int set) | 1086 | void mips_srs_free(int set) |
| 1013 | { | 1087 | { |
| 1014 | struct shadow_registers *sr = &shadow_registers; | 1088 | struct shadow_registers *sr = &shadow_registers; |
| 1015 | unsigned long flags; | ||
| 1016 | 1089 | ||
| 1017 | spin_lock_irqsave(&sr->sr_lock, flags); | 1090 | clear_bit(set, &sr->sr_allocated); |
| 1018 | sr->sr_allocated &= ~(1 << set); | ||
| 1019 | spin_unlock_irqrestore(&sr->sr_lock, flags); | ||
| 1020 | } | 1091 | } |
| 1021 | 1092 | ||
| 1022 | void *set_vi_srs_handler (int n, void *addr, int srs) | 1093 | static void *set_vi_srs_handler(int n, void *addr, int srs) |
| 1023 | { | 1094 | { |
| 1024 | unsigned long handler; | 1095 | unsigned long handler; |
| 1025 | unsigned long old_handler = vi_handlers[n]; | 1096 | unsigned long old_handler = vi_handlers[n]; |
| @@ -1032,8 +1103,7 @@ void *set_vi_srs_handler (int n, void *addr, int srs) | |||
| 1032 | if (addr == NULL) { | 1103 | if (addr == NULL) { |
| 1033 | handler = (unsigned long) do_default_vi; | 1104 | handler = (unsigned long) do_default_vi; |
| 1034 | srs = 0; | 1105 | srs = 0; |
| 1035 | } | 1106 | } else |
| 1036 | else | ||
| 1037 | handler = (unsigned long) addr; | 1107 | handler = (unsigned long) addr; |
| 1038 | vi_handlers[n] = (unsigned long) addr; | 1108 | vi_handlers[n] = (unsigned long) addr; |
| 1039 | 1109 | ||
| @@ -1045,8 +1115,7 @@ void *set_vi_srs_handler (int n, void *addr, int srs) | |||
| 1045 | if (cpu_has_veic) { | 1115 | if (cpu_has_veic) { |
| 1046 | if (board_bind_eic_interrupt) | 1116 | if (board_bind_eic_interrupt) |
| 1047 | board_bind_eic_interrupt (n, srs); | 1117 | board_bind_eic_interrupt (n, srs); |
| 1048 | } | 1118 | } else if (cpu_has_vint) { |
| 1049 | else if (cpu_has_vint) { | ||
| 1050 | /* SRSMap is only defined if shadow sets are implemented */ | 1119 | /* SRSMap is only defined if shadow sets are implemented */ |
| 1051 | if (mips_srs_max() > 1) | 1120 | if (mips_srs_max() > 1) |
| 1052 | change_c0_srsmap (0xf << n*4, srs << n*4); | 1121 | change_c0_srsmap (0xf << n*4, srs << n*4); |
| @@ -1060,6 +1129,15 @@ void *set_vi_srs_handler (int n, void *addr, int srs) | |||
| 1060 | 1129 | ||
| 1061 | extern char except_vec_vi, except_vec_vi_lui; | 1130 | extern char except_vec_vi, except_vec_vi_lui; |
| 1062 | extern char except_vec_vi_ori, except_vec_vi_end; | 1131 | extern char except_vec_vi_ori, except_vec_vi_end; |
| 1132 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 1133 | /* | ||
| 1134 | * We need to provide the SMTC vectored interrupt handler | ||
| 1135 | * not only with the address of the handler, but with the | ||
| 1136 | * Status.IM bit to be masked before going there. | ||
| 1137 | */ | ||
| 1138 | extern char except_vec_vi_mori; | ||
| 1139 | const int mori_offset = &except_vec_vi_mori - &except_vec_vi; | ||
| 1140 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
| 1063 | const int handler_len = &except_vec_vi_end - &except_vec_vi; | 1141 | const int handler_len = &except_vec_vi_end - &except_vec_vi; |
| 1064 | const int lui_offset = &except_vec_vi_lui - &except_vec_vi; | 1142 | const int lui_offset = &except_vec_vi_lui - &except_vec_vi; |
| 1065 | const int ori_offset = &except_vec_vi_ori - &except_vec_vi; | 1143 | const int ori_offset = &except_vec_vi_ori - &except_vec_vi; |
| @@ -1073,6 +1151,12 @@ void *set_vi_srs_handler (int n, void *addr, int srs) | |||
| 1073 | } | 1151 | } |
| 1074 | 1152 | ||
| 1075 | memcpy (b, &except_vec_vi, handler_len); | 1153 | memcpy (b, &except_vec_vi, handler_len); |
| 1154 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 1155 | if (n > 7) | ||
| 1156 | printk("Vector index %d exceeds SMTC maximum\n", n); | ||
| 1157 | w = (u32 *)(b + mori_offset); | ||
| 1158 | *w = (*w & 0xffff0000) | (0x100 << n); | ||
| 1159 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
| 1076 | w = (u32 *)(b + lui_offset); | 1160 | w = (u32 *)(b + lui_offset); |
| 1077 | *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); | 1161 | *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); |
| 1078 | w = (u32 *)(b + ori_offset); | 1162 | w = (u32 *)(b + ori_offset); |
| @@ -1095,9 +1179,9 @@ void *set_vi_srs_handler (int n, void *addr, int srs) | |||
| 1095 | return (void *)old_handler; | 1179 | return (void *)old_handler; |
| 1096 | } | 1180 | } |
| 1097 | 1181 | ||
| 1098 | void *set_vi_handler (int n, void *addr) | 1182 | void *set_vi_handler(int n, void *addr) |
| 1099 | { | 1183 | { |
| 1100 | return set_vi_srs_handler (n, addr, 0); | 1184 | return set_vi_srs_handler(n, addr, 0); |
| 1101 | } | 1185 | } |
| 1102 | #endif | 1186 | #endif |
| 1103 | 1187 | ||
| @@ -1113,8 +1197,29 @@ extern asmlinkage int _restore_fp_context(struct sigcontext *sc); | |||
| 1113 | extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc); | 1197 | extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc); |
| 1114 | extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc); | 1198 | extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc); |
| 1115 | 1199 | ||
| 1200 | #ifdef CONFIG_SMP | ||
| 1201 | static int smp_save_fp_context(struct sigcontext *sc) | ||
| 1202 | { | ||
| 1203 | return cpu_has_fpu | ||
| 1204 | ? _save_fp_context(sc) | ||
| 1205 | : fpu_emulator_save_context(sc); | ||
| 1206 | } | ||
| 1207 | |||
| 1208 | static int smp_restore_fp_context(struct sigcontext *sc) | ||
| 1209 | { | ||
| 1210 | return cpu_has_fpu | ||
| 1211 | ? _restore_fp_context(sc) | ||
| 1212 | : fpu_emulator_restore_context(sc); | ||
| 1213 | } | ||
| 1214 | #endif | ||
| 1215 | |||
| 1116 | static inline void signal_init(void) | 1216 | static inline void signal_init(void) |
| 1117 | { | 1217 | { |
| 1218 | #ifdef CONFIG_SMP | ||
| 1219 | /* For now just do the cpu_has_fpu check when the functions are invoked */ | ||
| 1220 | save_fp_context = smp_save_fp_context; | ||
| 1221 | restore_fp_context = smp_restore_fp_context; | ||
| 1222 | #else | ||
| 1118 | if (cpu_has_fpu) { | 1223 | if (cpu_has_fpu) { |
| 1119 | save_fp_context = _save_fp_context; | 1224 | save_fp_context = _save_fp_context; |
| 1120 | restore_fp_context = _restore_fp_context; | 1225 | restore_fp_context = _restore_fp_context; |
| @@ -1122,6 +1227,7 @@ static inline void signal_init(void) | |||
| 1122 | save_fp_context = fpu_emulator_save_context; | 1227 | save_fp_context = fpu_emulator_save_context; |
| 1123 | restore_fp_context = fpu_emulator_restore_context; | 1228 | restore_fp_context = fpu_emulator_restore_context; |
| 1124 | } | 1229 | } |
| 1230 | #endif | ||
| 1125 | } | 1231 | } |
| 1126 | 1232 | ||
| 1127 | #ifdef CONFIG_MIPS32_COMPAT | 1233 | #ifdef CONFIG_MIPS32_COMPAT |
| @@ -1158,6 +1264,20 @@ void __init per_cpu_trap_init(void) | |||
| 1158 | { | 1264 | { |
| 1159 | unsigned int cpu = smp_processor_id(); | 1265 | unsigned int cpu = smp_processor_id(); |
| 1160 | unsigned int status_set = ST0_CU0; | 1266 | unsigned int status_set = ST0_CU0; |
| 1267 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 1268 | int secondaryTC = 0; | ||
| 1269 | int bootTC = (cpu == 0); | ||
| 1270 | |||
| 1271 | /* | ||
| 1272 | * Only do per_cpu_trap_init() for first TC of Each VPE. | ||
| 1273 | * Note that this hack assumes that the SMTC init code | ||
| 1274 | * assigns TCs consecutively and in ascending order. | ||
| 1275 | */ | ||
| 1276 | |||
| 1277 | if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && | ||
| 1278 | ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id)) | ||
| 1279 | secondaryTC = 1; | ||
| 1280 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
| 1161 | 1281 | ||
| 1162 | /* | 1282 | /* |
| 1163 | * Disable coprocessors and select 32-bit or 64-bit addressing | 1283 | * Disable coprocessors and select 32-bit or 64-bit addressing |
| @@ -1180,6 +1300,10 @@ void __init per_cpu_trap_init(void) | |||
| 1180 | write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */ | 1300 | write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */ |
| 1181 | #endif | 1301 | #endif |
| 1182 | 1302 | ||
| 1303 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 1304 | if (!secondaryTC) { | ||
| 1305 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
| 1306 | |||
| 1183 | /* | 1307 | /* |
| 1184 | * Interrupt handling. | 1308 | * Interrupt handling. |
| 1185 | */ | 1309 | */ |
| @@ -1196,6 +1320,9 @@ void __init per_cpu_trap_init(void) | |||
| 1196 | } else | 1320 | } else |
| 1197 | set_c0_cause(CAUSEF_IV); | 1321 | set_c0_cause(CAUSEF_IV); |
| 1198 | } | 1322 | } |
| 1323 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 1324 | } | ||
| 1325 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
| 1199 | 1326 | ||
| 1200 | cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; | 1327 | cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; |
| 1201 | TLBMISS_HANDLER_SETUP(); | 1328 | TLBMISS_HANDLER_SETUP(); |
| @@ -1205,8 +1332,14 @@ void __init per_cpu_trap_init(void) | |||
| 1205 | BUG_ON(current->mm); | 1332 | BUG_ON(current->mm); |
| 1206 | enter_lazy_tlb(&init_mm, current); | 1333 | enter_lazy_tlb(&init_mm, current); |
| 1207 | 1334 | ||
| 1208 | cpu_cache_init(); | 1335 | #ifdef CONFIG_MIPS_MT_SMTC |
| 1209 | tlb_init(); | 1336 | if (bootTC) { |
| 1337 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
| 1338 | cpu_cache_init(); | ||
| 1339 | tlb_init(); | ||
| 1340 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 1341 | } | ||
| 1342 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
| 1210 | } | 1343 | } |
| 1211 | 1344 | ||
| 1212 | /* Install CPU exception handler */ | 1345 | /* Install CPU exception handler */ |
| @@ -1278,7 +1411,7 @@ void __init trap_init(void) | |||
| 1278 | if (cpu_has_veic || cpu_has_vint) { | 1411 | if (cpu_has_veic || cpu_has_vint) { |
| 1279 | int nvec = cpu_has_veic ? 64 : 8; | 1412 | int nvec = cpu_has_veic ? 64 : 8; |
| 1280 | for (i = 0; i < nvec; i++) | 1413 | for (i = 0; i < nvec; i++) |
| 1281 | set_vi_handler (i, NULL); | 1414 | set_vi_handler(i, NULL); |
| 1282 | } | 1415 | } |
| 1283 | else if (cpu_has_divec) | 1416 | else if (cpu_has_divec) |
| 1284 | set_handler(0x200, &except_vec4, 0x8); | 1417 | set_handler(0x200, &except_vec4, 0x8); |
| @@ -1297,6 +1430,7 @@ void __init trap_init(void) | |||
| 1297 | if (board_be_init) | 1430 | if (board_be_init) |
| 1298 | board_be_init(); | 1431 | board_be_init(); |
| 1299 | 1432 | ||
| 1433 | set_except_vector(0, handle_int); | ||
| 1300 | set_except_vector(1, handle_tlbm); | 1434 | set_except_vector(1, handle_tlbm); |
| 1301 | set_except_vector(2, handle_tlbl); | 1435 | set_except_vector(2, handle_tlbl); |
| 1302 | set_except_vector(3, handle_tlbs); | 1436 | set_except_vector(3, handle_tlbs); |
