aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel/traps.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel/traps.c')
-rw-r--r--arch/mips/kernel/traps.c232
1 files changed, 191 insertions, 41 deletions
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index bed0eb6cf55d..a7564b08eb4d 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -42,6 +42,7 @@
42#include <asm/watch.h> 42#include <asm/watch.h>
43#include <asm/types.h> 43#include <asm/types.h>
44 44
45extern asmlinkage void handle_int(void);
45extern asmlinkage void handle_tlbm(void); 46extern asmlinkage void handle_tlbm(void);
46extern asmlinkage void handle_tlbl(void); 47extern asmlinkage void handle_tlbl(void);
47extern asmlinkage void handle_tlbs(void); 48extern asmlinkage void handle_tlbs(void);
@@ -279,9 +280,16 @@ static DEFINE_SPINLOCK(die_lock);
279NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs) 280NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs)
280{ 281{
281 static int die_counter; 282 static int die_counter;
283#ifdef CONFIG_MIPS_MT_SMTC
284 unsigned long dvpret = dvpe();
285#endif /* CONFIG_MIPS_MT_SMTC */
282 286
283 console_verbose(); 287 console_verbose();
284 spin_lock_irq(&die_lock); 288 spin_lock_irq(&die_lock);
289 bust_spinlocks(1);
290#ifdef CONFIG_MIPS_MT_SMTC
291 mips_mt_regdump(dvpret);
292#endif /* CONFIG_MIPS_MT_SMTC */
285 printk("%s[#%d]:\n", str, ++die_counter); 293 printk("%s[#%d]:\n", str, ++die_counter);
286 show_registers(regs); 294 show_registers(regs);
287 spin_unlock_irq(&die_lock); 295 spin_unlock_irq(&die_lock);
@@ -750,12 +758,43 @@ asmlinkage void do_cpu(struct pt_regs *regs)
750 &current->thread.fpu.soft); 758 &current->thread.fpu.soft);
751 if (sig) 759 if (sig)
752 force_sig(sig, current); 760 force_sig(sig, current);
761#ifdef CONFIG_MIPS_MT_FPAFF
762 else {
763 /*
764 * MIPS MT processors may have fewer FPU contexts
765 * than CPU threads. If we've emulated more than
766 * some threshold number of instructions, force
767 * migration to a "CPU" that has FP support.
768 */
769 if(mt_fpemul_threshold > 0
770 && ((current->thread.emulated_fp++
771 > mt_fpemul_threshold))) {
772 /*
773 * If there's no FPU present, or if the
774 * application has already restricted
775 * the allowed set to exclude any CPUs
776 * with FPUs, we'll skip the procedure.
777 */
778 if (cpus_intersects(current->cpus_allowed,
779 mt_fpu_cpumask)) {
780 cpumask_t tmask;
781
782 cpus_and(tmask,
783 current->thread.user_cpus_allowed,
784 mt_fpu_cpumask);
785 set_cpus_allowed(current, tmask);
786 current->thread.mflags |= MF_FPUBOUND;
787 }
788 }
789 }
790#endif /* CONFIG_MIPS_MT_FPAFF */
753 } 791 }
754 792
755 return; 793 return;
756 794
757 case 2: 795 case 2:
758 case 3: 796 case 3:
797 die_if_kernel("do_cpu invoked from kernel context!", regs);
759 break; 798 break;
760 } 799 }
761 800
@@ -780,19 +819,64 @@ asmlinkage void do_watch(struct pt_regs *regs)
780 819
781asmlinkage void do_mcheck(struct pt_regs *regs) 820asmlinkage void do_mcheck(struct pt_regs *regs)
782{ 821{
822 const int field = 2 * sizeof(unsigned long);
823 int multi_match = regs->cp0_status & ST0_TS;
824
783 show_regs(regs); 825 show_regs(regs);
784 dump_tlb_all(); 826
827 if (multi_match) {
828 printk("Index : %0x\n", read_c0_index());
829 printk("Pagemask: %0x\n", read_c0_pagemask());
830 printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
831 printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
832 printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
833 printk("\n");
834 dump_tlb_all();
835 }
836
837 show_code((unsigned int *) regs->cp0_epc);
838
785 /* 839 /*
786 * Some chips may have other causes of machine check (e.g. SB1 840 * Some chips may have other causes of machine check (e.g. SB1
787 * graduation timer) 841 * graduation timer)
788 */ 842 */
789 panic("Caught Machine Check exception - %scaused by multiple " 843 panic("Caught Machine Check exception - %scaused by multiple "
790 "matching entries in the TLB.", 844 "matching entries in the TLB.",
791 (regs->cp0_status & ST0_TS) ? "" : "not "); 845 (multi_match) ? "" : "not ");
792} 846}
793 847
794asmlinkage void do_mt(struct pt_regs *regs) 848asmlinkage void do_mt(struct pt_regs *regs)
795{ 849{
850 int subcode;
851
852 die_if_kernel("MIPS MT Thread exception in kernel", regs);
853
854 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
855 >> VPECONTROL_EXCPT_SHIFT;
856 switch (subcode) {
857 case 0:
858 printk(KERN_ERR "Thread Underflow\n");
859 break;
860 case 1:
861 printk(KERN_ERR "Thread Overflow\n");
862 break;
863 case 2:
864 printk(KERN_ERR "Invalid YIELD Qualifier\n");
865 break;
866 case 3:
867 printk(KERN_ERR "Gating Storage Exception\n");
868 break;
869 case 4:
870 printk(KERN_ERR "YIELD Scheduler Exception\n");
871 break;
872 case 5:
873 printk(KERN_ERR "Gating Storage Schedulier Exception\n");
874 break;
875 default:
876 printk(KERN_ERR "*** UNKNOWN THREAD EXCEPTION %d ***\n",
877 subcode);
878 break;
879 }
796 die_if_kernel("MIPS MT Thread exception in kernel", regs); 880 die_if_kernel("MIPS MT Thread exception in kernel", regs);
797 881
798 force_sig(SIGILL, current); 882 force_sig(SIGILL, current);
@@ -833,6 +917,7 @@ static inline void parity_protection_init(void)
833{ 917{
834 switch (current_cpu_data.cputype) { 918 switch (current_cpu_data.cputype) {
835 case CPU_24K: 919 case CPU_24K:
920 case CPU_34K:
836 case CPU_5KC: 921 case CPU_5KC:
837 write_c0_ecc(0x80000000); 922 write_c0_ecc(0x80000000);
838 back_to_back_c0_hazard(); 923 back_to_back_c0_hazard();
@@ -928,7 +1013,15 @@ void ejtag_exception_handler(struct pt_regs *regs)
928 */ 1013 */
929void nmi_exception_handler(struct pt_regs *regs) 1014void nmi_exception_handler(struct pt_regs *regs)
930{ 1015{
1016#ifdef CONFIG_MIPS_MT_SMTC
1017 unsigned long dvpret = dvpe();
1018 bust_spinlocks(1);
931 printk("NMI taken!!!!\n"); 1019 printk("NMI taken!!!!\n");
1020 mips_mt_regdump(dvpret);
1021#else
1022 bust_spinlocks(1);
1023 printk("NMI taken!!!!\n");
1024#endif /* CONFIG_MIPS_MT_SMTC */
932 die("NMI", regs); 1025 die("NMI", regs);
933 while(1) ; 1026 while(1) ;
934} 1027}
@@ -960,27 +1053,29 @@ void *set_except_vector(int n, void *addr)
960 1053
961#ifdef CONFIG_CPU_MIPSR2 1054#ifdef CONFIG_CPU_MIPSR2
962/* 1055/*
963 * Shadow register allocation 1056 * MIPSR2 shadow register set allocation
964 * FIXME: SMP... 1057 * FIXME: SMP...
965 */ 1058 */
966 1059
967/* MIPSR2 shadow register sets */ 1060static struct shadow_registers {
968struct shadow_registers { 1061 /*
969 spinlock_t sr_lock; /* */ 1062 * Number of shadow register sets supported
970 int sr_supported; /* Number of shadow register sets supported */ 1063 */
971 int sr_allocated; /* Bitmap of allocated shadow registers */ 1064 unsigned long sr_supported;
1065 /*
1066 * Bitmap of allocated shadow registers
1067 */
1068 unsigned long sr_allocated;
972} shadow_registers; 1069} shadow_registers;
973 1070
974void mips_srs_init(void) 1071static void mips_srs_init(void)
975{ 1072{
976#ifdef CONFIG_CPU_MIPSR2_SRS 1073#ifdef CONFIG_CPU_MIPSR2_SRS
977 shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1; 1074 shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
978 printk ("%d MIPSR2 register sets available\n", shadow_registers.sr_supported); 1075 printk(KERN_INFO "%d MIPSR2 register sets available\n",
979#else 1076 shadow_registers.sr_supported);
980 shadow_registers.sr_supported = 1;
981#endif 1077#endif
982 shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */ 1078 shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */
983 spin_lock_init(&shadow_registers.sr_lock);
984} 1079}
985 1080
986int mips_srs_max(void) 1081int mips_srs_max(void)
@@ -988,38 +1083,30 @@ int mips_srs_max(void)
988 return shadow_registers.sr_supported; 1083 return shadow_registers.sr_supported;
989} 1084}
990 1085
991int mips_srs_alloc (void) 1086int mips_srs_alloc(void)
992{ 1087{
993 struct shadow_registers *sr = &shadow_registers; 1088 struct shadow_registers *sr = &shadow_registers;
994 unsigned long flags;
995 int set; 1089 int set;
996 1090
997 spin_lock_irqsave(&sr->sr_lock, flags); 1091again:
1092 set = find_first_zero_bit(&sr->sr_allocated, sr->sr_supported);
1093 if (set >= sr->sr_supported)
1094 return -1;
998 1095
999 for (set = 0; set < sr->sr_supported; set++) { 1096 if (test_and_set_bit(set, &sr->sr_allocated))
1000 if ((sr->sr_allocated & (1 << set)) == 0) { 1097 goto again;
1001 sr->sr_allocated |= 1 << set;
1002 spin_unlock_irqrestore(&sr->sr_lock, flags);
1003 return set;
1004 }
1005 }
1006 1098
1007 /* None available */ 1099 return set;
1008 spin_unlock_irqrestore(&sr->sr_lock, flags);
1009 return -1;
1010} 1100}
1011 1101
1012void mips_srs_free (int set) 1102void mips_srs_free(int set)
1013{ 1103{
1014 struct shadow_registers *sr = &shadow_registers; 1104 struct shadow_registers *sr = &shadow_registers;
1015 unsigned long flags;
1016 1105
1017 spin_lock_irqsave(&sr->sr_lock, flags); 1106 clear_bit(set, &sr->sr_allocated);
1018 sr->sr_allocated &= ~(1 << set);
1019 spin_unlock_irqrestore(&sr->sr_lock, flags);
1020} 1107}
1021 1108
1022void *set_vi_srs_handler (int n, void *addr, int srs) 1109static void *set_vi_srs_handler(int n, void *addr, int srs)
1023{ 1110{
1024 unsigned long handler; 1111 unsigned long handler;
1025 unsigned long old_handler = vi_handlers[n]; 1112 unsigned long old_handler = vi_handlers[n];
@@ -1032,8 +1119,7 @@ void *set_vi_srs_handler (int n, void *addr, int srs)
1032 if (addr == NULL) { 1119 if (addr == NULL) {
1033 handler = (unsigned long) do_default_vi; 1120 handler = (unsigned long) do_default_vi;
1034 srs = 0; 1121 srs = 0;
1035 } 1122 } else
1036 else
1037 handler = (unsigned long) addr; 1123 handler = (unsigned long) addr;
1038 vi_handlers[n] = (unsigned long) addr; 1124 vi_handlers[n] = (unsigned long) addr;
1039 1125
@@ -1045,8 +1131,7 @@ void *set_vi_srs_handler (int n, void *addr, int srs)
1045 if (cpu_has_veic) { 1131 if (cpu_has_veic) {
1046 if (board_bind_eic_interrupt) 1132 if (board_bind_eic_interrupt)
1047 board_bind_eic_interrupt (n, srs); 1133 board_bind_eic_interrupt (n, srs);
1048 } 1134 } else if (cpu_has_vint) {
1049 else if (cpu_has_vint) {
1050 /* SRSMap is only defined if shadow sets are implemented */ 1135 /* SRSMap is only defined if shadow sets are implemented */
1051 if (mips_srs_max() > 1) 1136 if (mips_srs_max() > 1)
1052 change_c0_srsmap (0xf << n*4, srs << n*4); 1137 change_c0_srsmap (0xf << n*4, srs << n*4);
@@ -1060,6 +1145,15 @@ void *set_vi_srs_handler (int n, void *addr, int srs)
1060 1145
1061 extern char except_vec_vi, except_vec_vi_lui; 1146 extern char except_vec_vi, except_vec_vi_lui;
1062 extern char except_vec_vi_ori, except_vec_vi_end; 1147 extern char except_vec_vi_ori, except_vec_vi_end;
1148#ifdef CONFIG_MIPS_MT_SMTC
1149 /*
1150 * We need to provide the SMTC vectored interrupt handler
1151 * not only with the address of the handler, but with the
1152 * Status.IM bit to be masked before going there.
1153 */
1154 extern char except_vec_vi_mori;
1155 const int mori_offset = &except_vec_vi_mori - &except_vec_vi;
1156#endif /* CONFIG_MIPS_MT_SMTC */
1063 const int handler_len = &except_vec_vi_end - &except_vec_vi; 1157 const int handler_len = &except_vec_vi_end - &except_vec_vi;
1064 const int lui_offset = &except_vec_vi_lui - &except_vec_vi; 1158 const int lui_offset = &except_vec_vi_lui - &except_vec_vi;
1065 const int ori_offset = &except_vec_vi_ori - &except_vec_vi; 1159 const int ori_offset = &except_vec_vi_ori - &except_vec_vi;
@@ -1073,6 +1167,12 @@ void *set_vi_srs_handler (int n, void *addr, int srs)
1073 } 1167 }
1074 1168
1075 memcpy (b, &except_vec_vi, handler_len); 1169 memcpy (b, &except_vec_vi, handler_len);
1170#ifdef CONFIG_MIPS_MT_SMTC
1171 if (n > 7)
1172 printk("Vector index %d exceeds SMTC maximum\n", n);
1173 w = (u32 *)(b + mori_offset);
1174 *w = (*w & 0xffff0000) | (0x100 << n);
1175#endif /* CONFIG_MIPS_MT_SMTC */
1076 w = (u32 *)(b + lui_offset); 1176 w = (u32 *)(b + lui_offset);
1077 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); 1177 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
1078 w = (u32 *)(b + ori_offset); 1178 w = (u32 *)(b + ori_offset);
@@ -1095,9 +1195,9 @@ void *set_vi_srs_handler (int n, void *addr, int srs)
1095 return (void *)old_handler; 1195 return (void *)old_handler;
1096} 1196}
1097 1197
1098void *set_vi_handler (int n, void *addr) 1198void *set_vi_handler(int n, void *addr)
1099{ 1199{
1100 return set_vi_srs_handler (n, addr, 0); 1200 return set_vi_srs_handler(n, addr, 0);
1101} 1201}
1102#endif 1202#endif
1103 1203
@@ -1113,8 +1213,29 @@ extern asmlinkage int _restore_fp_context(struct sigcontext *sc);
1113extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc); 1213extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc);
1114extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc); 1214extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc);
1115 1215
1216#ifdef CONFIG_SMP
1217static int smp_save_fp_context(struct sigcontext *sc)
1218{
1219 return cpu_has_fpu
1220 ? _save_fp_context(sc)
1221 : fpu_emulator_save_context(sc);
1222}
1223
1224static int smp_restore_fp_context(struct sigcontext *sc)
1225{
1226 return cpu_has_fpu
1227 ? _restore_fp_context(sc)
1228 : fpu_emulator_restore_context(sc);
1229}
1230#endif
1231
1116static inline void signal_init(void) 1232static inline void signal_init(void)
1117{ 1233{
1234#ifdef CONFIG_SMP
1235 /* For now just do the cpu_has_fpu check when the functions are invoked */
1236 save_fp_context = smp_save_fp_context;
1237 restore_fp_context = smp_restore_fp_context;
1238#else
1118 if (cpu_has_fpu) { 1239 if (cpu_has_fpu) {
1119 save_fp_context = _save_fp_context; 1240 save_fp_context = _save_fp_context;
1120 restore_fp_context = _restore_fp_context; 1241 restore_fp_context = _restore_fp_context;
@@ -1122,6 +1243,7 @@ static inline void signal_init(void)
1122 save_fp_context = fpu_emulator_save_context; 1243 save_fp_context = fpu_emulator_save_context;
1123 restore_fp_context = fpu_emulator_restore_context; 1244 restore_fp_context = fpu_emulator_restore_context;
1124 } 1245 }
1246#endif
1125} 1247}
1126 1248
1127#ifdef CONFIG_MIPS32_COMPAT 1249#ifdef CONFIG_MIPS32_COMPAT
@@ -1158,6 +1280,20 @@ void __init per_cpu_trap_init(void)
1158{ 1280{
1159 unsigned int cpu = smp_processor_id(); 1281 unsigned int cpu = smp_processor_id();
1160 unsigned int status_set = ST0_CU0; 1282 unsigned int status_set = ST0_CU0;
1283#ifdef CONFIG_MIPS_MT_SMTC
1284 int secondaryTC = 0;
1285 int bootTC = (cpu == 0);
1286
1287 /*
1288 * Only do per_cpu_trap_init() for first TC of Each VPE.
1289 * Note that this hack assumes that the SMTC init code
1290 * assigns TCs consecutively and in ascending order.
1291 */
1292
1293 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
1294 ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
1295 secondaryTC = 1;
1296#endif /* CONFIG_MIPS_MT_SMTC */
1161 1297
1162 /* 1298 /*
1163 * Disable coprocessors and select 32-bit or 64-bit addressing 1299 * Disable coprocessors and select 32-bit or 64-bit addressing
@@ -1180,6 +1316,10 @@ void __init per_cpu_trap_init(void)
1180 write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */ 1316 write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */
1181#endif 1317#endif
1182 1318
1319#ifdef CONFIG_MIPS_MT_SMTC
1320 if (!secondaryTC) {
1321#endif /* CONFIG_MIPS_MT_SMTC */
1322
1183 /* 1323 /*
1184 * Interrupt handling. 1324 * Interrupt handling.
1185 */ 1325 */
@@ -1196,6 +1336,9 @@ void __init per_cpu_trap_init(void)
1196 } else 1336 } else
1197 set_c0_cause(CAUSEF_IV); 1337 set_c0_cause(CAUSEF_IV);
1198 } 1338 }
1339#ifdef CONFIG_MIPS_MT_SMTC
1340 }
1341#endif /* CONFIG_MIPS_MT_SMTC */
1199 1342
1200 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; 1343 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1201 TLBMISS_HANDLER_SETUP(); 1344 TLBMISS_HANDLER_SETUP();
@@ -1205,8 +1348,14 @@ void __init per_cpu_trap_init(void)
1205 BUG_ON(current->mm); 1348 BUG_ON(current->mm);
1206 enter_lazy_tlb(&init_mm, current); 1349 enter_lazy_tlb(&init_mm, current);
1207 1350
1208 cpu_cache_init(); 1351#ifdef CONFIG_MIPS_MT_SMTC
1209 tlb_init(); 1352 if (bootTC) {
1353#endif /* CONFIG_MIPS_MT_SMTC */
1354 cpu_cache_init();
1355 tlb_init();
1356#ifdef CONFIG_MIPS_MT_SMTC
1357 }
1358#endif /* CONFIG_MIPS_MT_SMTC */
1210} 1359}
1211 1360
1212/* Install CPU exception handler */ 1361/* Install CPU exception handler */
@@ -1278,7 +1427,7 @@ void __init trap_init(void)
1278 if (cpu_has_veic || cpu_has_vint) { 1427 if (cpu_has_veic || cpu_has_vint) {
1279 int nvec = cpu_has_veic ? 64 : 8; 1428 int nvec = cpu_has_veic ? 64 : 8;
1280 for (i = 0; i < nvec; i++) 1429 for (i = 0; i < nvec; i++)
1281 set_vi_handler (i, NULL); 1430 set_vi_handler(i, NULL);
1282 } 1431 }
1283 else if (cpu_has_divec) 1432 else if (cpu_has_divec)
1284 set_handler(0x200, &except_vec4, 0x8); 1433 set_handler(0x200, &except_vec4, 0x8);
@@ -1297,6 +1446,7 @@ void __init trap_init(void)
1297 if (board_be_init) 1446 if (board_be_init)
1298 board_be_init(); 1447 board_be_init();
1299 1448
1449 set_except_vector(0, handle_int);
1300 set_except_vector(1, handle_tlbm); 1450 set_except_vector(1, handle_tlbm);
1301 set_except_vector(2, handle_tlbl); 1451 set_except_vector(2, handle_tlbl);
1302 set_except_vector(3, handle_tlbs); 1452 set_except_vector(3, handle_tlbs);