diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2006-04-05 04:45:45 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2006-04-18 22:14:28 -0400 |
commit | 41c594ab65fc89573af296d192aa5235d09717ab (patch) | |
tree | 562462512a320f386bdf49eabfbb26bb3ee761fa /arch/mips/kernel/traps.c | |
parent | 2600990e640e3bef29ed89d565864cf16ee83833 (diff) |
[MIPS] MT: Improved multithreading support.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kernel/traps.c')
-rw-r--r-- | arch/mips/kernel/traps.c | 124 |
1 files changed, 116 insertions, 8 deletions
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 081e6ed5bb62..6336fe8008ec 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -280,9 +280,16 @@ static DEFINE_SPINLOCK(die_lock); | |||
280 | NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs) | 280 | NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs) |
281 | { | 281 | { |
282 | static int die_counter; | 282 | static int die_counter; |
283 | #ifdef CONFIG_MIPS_MT_SMTC | ||
284 | unsigned long dvpret = dvpe(); | ||
285 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
283 | 286 | ||
284 | console_verbose(); | 287 | console_verbose(); |
285 | spin_lock_irq(&die_lock); | 288 | spin_lock_irq(&die_lock); |
289 | bust_spinlocks(1); | ||
290 | #ifdef CONFIG_MIPS_MT_SMTC | ||
291 | mips_mt_regdump(dvpret); | ||
292 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
286 | printk("%s[#%d]:\n", str, ++die_counter); | 293 | printk("%s[#%d]:\n", str, ++die_counter); |
287 | show_registers(regs); | 294 | show_registers(regs); |
288 | spin_unlock_irq(&die_lock); | 295 | spin_unlock_irq(&die_lock); |
@@ -757,6 +764,7 @@ asmlinkage void do_cpu(struct pt_regs *regs) | |||
757 | 764 | ||
758 | case 2: | 765 | case 2: |
759 | case 3: | 766 | case 3: |
767 | die_if_kernel("do_cpu invoked from kernel context!", regs); | ||
760 | break; | 768 | break; |
761 | } | 769 | } |
762 | 770 | ||
@@ -794,6 +802,36 @@ asmlinkage void do_mcheck(struct pt_regs *regs) | |||
794 | 802 | ||
795 | asmlinkage void do_mt(struct pt_regs *regs) | 803 | asmlinkage void do_mt(struct pt_regs *regs) |
796 | { | 804 | { |
805 | int subcode; | ||
806 | |||
807 | die_if_kernel("MIPS MT Thread exception in kernel", regs); | ||
808 | |||
809 | subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT) | ||
810 | >> VPECONTROL_EXCPT_SHIFT; | ||
811 | switch (subcode) { | ||
812 | case 0: | ||
813 | printk(KERN_ERR "Thread Underflow\n"); | ||
814 | break; | ||
815 | case 1: | ||
816 | printk(KERN_ERR "Thread Overflow\n"); | ||
817 | break; | ||
818 | case 2: | ||
819 | printk(KERN_ERR "Invalid YIELD Qualifier\n"); | ||
820 | break; | ||
821 | case 3: | ||
822 | printk(KERN_ERR "Gating Storage Exception\n"); | ||
823 | break; | ||
824 | case 4: | ||
825 | printk(KERN_ERR "YIELD Scheduler Exception\n"); | ||
826 | break; | ||
827 | case 5: | ||
828 | printk(KERN_ERR "Gating Storage Schedulier Exception\n"); | ||
829 | break; | ||
830 | default: | ||
831 | printk(KERN_ERR "*** UNKNOWN THREAD EXCEPTION %d ***\n", | ||
832 | subcode); | ||
833 | break; | ||
834 | } | ||
797 | die_if_kernel("MIPS MT Thread exception in kernel", regs); | 835 | die_if_kernel("MIPS MT Thread exception in kernel", regs); |
798 | 836 | ||
799 | force_sig(SIGILL, current); | 837 | force_sig(SIGILL, current); |
@@ -929,7 +967,15 @@ void ejtag_exception_handler(struct pt_regs *regs) | |||
929 | */ | 967 | */ |
930 | void nmi_exception_handler(struct pt_regs *regs) | 968 | void nmi_exception_handler(struct pt_regs *regs) |
931 | { | 969 | { |
970 | #ifdef CONFIG_MIPS_MT_SMTC | ||
971 | unsigned long dvpret = dvpe(); | ||
972 | bust_spinlocks(1); | ||
973 | printk("NMI taken!!!!\n"); | ||
974 | mips_mt_regdump(dvpret); | ||
975 | #else | ||
976 | bust_spinlocks(1); | ||
932 | printk("NMI taken!!!!\n"); | 977 | printk("NMI taken!!!!\n"); |
978 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
933 | die("NMI", regs); | 979 | die("NMI", regs); |
934 | while(1) ; | 980 | while(1) ; |
935 | } | 981 | } |
@@ -1007,7 +1053,7 @@ again: | |||
1007 | return set; | 1053 | return set; |
1008 | } | 1054 | } |
1009 | 1055 | ||
1010 | void mips_srs_free (int set) | 1056 | void mips_srs_free(int set) |
1011 | { | 1057 | { |
1012 | struct shadow_registers *sr = &shadow_registers; | 1058 | struct shadow_registers *sr = &shadow_registers; |
1013 | 1059 | ||
@@ -1027,8 +1073,7 @@ static void *set_vi_srs_handler(int n, void *addr, int srs) | |||
1027 | if (addr == NULL) { | 1073 | if (addr == NULL) { |
1028 | handler = (unsigned long) do_default_vi; | 1074 | handler = (unsigned long) do_default_vi; |
1029 | srs = 0; | 1075 | srs = 0; |
1030 | } | 1076 | } else |
1031 | else | ||
1032 | handler = (unsigned long) addr; | 1077 | handler = (unsigned long) addr; |
1033 | vi_handlers[n] = (unsigned long) addr; | 1078 | vi_handlers[n] = (unsigned long) addr; |
1034 | 1079 | ||
@@ -1040,8 +1085,7 @@ static void *set_vi_srs_handler(int n, void *addr, int srs) | |||
1040 | if (cpu_has_veic) { | 1085 | if (cpu_has_veic) { |
1041 | if (board_bind_eic_interrupt) | 1086 | if (board_bind_eic_interrupt) |
1042 | board_bind_eic_interrupt (n, srs); | 1087 | board_bind_eic_interrupt (n, srs); |
1043 | } | 1088 | } else if (cpu_has_vint) { |
1044 | else if (cpu_has_vint) { | ||
1045 | /* SRSMap is only defined if shadow sets are implemented */ | 1089 | /* SRSMap is only defined if shadow sets are implemented */ |
1046 | if (mips_srs_max() > 1) | 1090 | if (mips_srs_max() > 1) |
1047 | change_c0_srsmap (0xf << n*4, srs << n*4); | 1091 | change_c0_srsmap (0xf << n*4, srs << n*4); |
@@ -1055,6 +1099,15 @@ static void *set_vi_srs_handler(int n, void *addr, int srs) | |||
1055 | 1099 | ||
1056 | extern char except_vec_vi, except_vec_vi_lui; | 1100 | extern char except_vec_vi, except_vec_vi_lui; |
1057 | extern char except_vec_vi_ori, except_vec_vi_end; | 1101 | extern char except_vec_vi_ori, except_vec_vi_end; |
1102 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1103 | /* | ||
1104 | * We need to provide the SMTC vectored interrupt handler | ||
1105 | * not only with the address of the handler, but with the | ||
1106 | * Status.IM bit to be masked before going there. | ||
1107 | */ | ||
1108 | extern char except_vec_vi_mori; | ||
1109 | const int mori_offset = &except_vec_vi_mori - &except_vec_vi; | ||
1110 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1058 | const int handler_len = &except_vec_vi_end - &except_vec_vi; | 1111 | const int handler_len = &except_vec_vi_end - &except_vec_vi; |
1059 | const int lui_offset = &except_vec_vi_lui - &except_vec_vi; | 1112 | const int lui_offset = &except_vec_vi_lui - &except_vec_vi; |
1060 | const int ori_offset = &except_vec_vi_ori - &except_vec_vi; | 1113 | const int ori_offset = &except_vec_vi_ori - &except_vec_vi; |
@@ -1068,6 +1121,12 @@ static void *set_vi_srs_handler(int n, void *addr, int srs) | |||
1068 | } | 1121 | } |
1069 | 1122 | ||
1070 | memcpy (b, &except_vec_vi, handler_len); | 1123 | memcpy (b, &except_vec_vi, handler_len); |
1124 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1125 | if (n > 7) | ||
1126 | printk("Vector index %d exceeds SMTC maximum\n", n); | ||
1127 | w = (u32 *)(b + mori_offset); | ||
1128 | *w = (*w & 0xffff0000) | (0x100 << n); | ||
1129 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1071 | w = (u32 *)(b + lui_offset); | 1130 | w = (u32 *)(b + lui_offset); |
1072 | *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); | 1131 | *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); |
1073 | w = (u32 *)(b + ori_offset); | 1132 | w = (u32 *)(b + ori_offset); |
@@ -1090,7 +1149,7 @@ static void *set_vi_srs_handler(int n, void *addr, int srs) | |||
1090 | return (void *)old_handler; | 1149 | return (void *)old_handler; |
1091 | } | 1150 | } |
1092 | 1151 | ||
1093 | void *set_vi_handler (int n, void *addr) | 1152 | void *set_vi_handler(int n, void *addr) |
1094 | { | 1153 | { |
1095 | return set_vi_srs_handler(n, addr, 0); | 1154 | return set_vi_srs_handler(n, addr, 0); |
1096 | } | 1155 | } |
@@ -1108,8 +1167,29 @@ extern asmlinkage int _restore_fp_context(struct sigcontext *sc); | |||
1108 | extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc); | 1167 | extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc); |
1109 | extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc); | 1168 | extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc); |
1110 | 1169 | ||
1170 | #ifdef CONFIG_SMP | ||
1171 | static int smp_save_fp_context(struct sigcontext *sc) | ||
1172 | { | ||
1173 | return cpu_has_fpu | ||
1174 | ? _save_fp_context(sc) | ||
1175 | : fpu_emulator_save_context(sc); | ||
1176 | } | ||
1177 | |||
1178 | static int smp_restore_fp_context(struct sigcontext *sc) | ||
1179 | { | ||
1180 | return cpu_has_fpu | ||
1181 | ? _restore_fp_context(sc) | ||
1182 | : fpu_emulator_restore_context(sc); | ||
1183 | } | ||
1184 | #endif | ||
1185 | |||
1111 | static inline void signal_init(void) | 1186 | static inline void signal_init(void) |
1112 | { | 1187 | { |
1188 | #ifdef CONFIG_SMP | ||
1189 | /* For now just do the cpu_has_fpu check when the functions are invoked */ | ||
1190 | save_fp_context = smp_save_fp_context; | ||
1191 | restore_fp_context = smp_restore_fp_context; | ||
1192 | #else | ||
1113 | if (cpu_has_fpu) { | 1193 | if (cpu_has_fpu) { |
1114 | save_fp_context = _save_fp_context; | 1194 | save_fp_context = _save_fp_context; |
1115 | restore_fp_context = _restore_fp_context; | 1195 | restore_fp_context = _restore_fp_context; |
@@ -1117,6 +1197,7 @@ static inline void signal_init(void) | |||
1117 | save_fp_context = fpu_emulator_save_context; | 1197 | save_fp_context = fpu_emulator_save_context; |
1118 | restore_fp_context = fpu_emulator_restore_context; | 1198 | restore_fp_context = fpu_emulator_restore_context; |
1119 | } | 1199 | } |
1200 | #endif | ||
1120 | } | 1201 | } |
1121 | 1202 | ||
1122 | #ifdef CONFIG_MIPS32_COMPAT | 1203 | #ifdef CONFIG_MIPS32_COMPAT |
@@ -1153,6 +1234,20 @@ void __init per_cpu_trap_init(void) | |||
1153 | { | 1234 | { |
1154 | unsigned int cpu = smp_processor_id(); | 1235 | unsigned int cpu = smp_processor_id(); |
1155 | unsigned int status_set = ST0_CU0; | 1236 | unsigned int status_set = ST0_CU0; |
1237 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1238 | int secondaryTC = 0; | ||
1239 | int bootTC = (cpu == 0); | ||
1240 | |||
1241 | /* | ||
1242 | * Only do per_cpu_trap_init() for first TC of Each VPE. | ||
1243 | * Note that this hack assumes that the SMTC init code | ||
1244 | * assigns TCs consecutively and in ascending order. | ||
1245 | */ | ||
1246 | |||
1247 | if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && | ||
1248 | ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id)) | ||
1249 | secondaryTC = 1; | ||
1250 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1156 | 1251 | ||
1157 | /* | 1252 | /* |
1158 | * Disable coprocessors and select 32-bit or 64-bit addressing | 1253 | * Disable coprocessors and select 32-bit or 64-bit addressing |
@@ -1175,6 +1270,10 @@ void __init per_cpu_trap_init(void) | |||
1175 | write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */ | 1270 | write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */ |
1176 | #endif | 1271 | #endif |
1177 | 1272 | ||
1273 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1274 | if (!secondaryTC) { | ||
1275 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1276 | |||
1178 | /* | 1277 | /* |
1179 | * Interrupt handling. | 1278 | * Interrupt handling. |
1180 | */ | 1279 | */ |
@@ -1191,6 +1290,9 @@ void __init per_cpu_trap_init(void) | |||
1191 | } else | 1290 | } else |
1192 | set_c0_cause(CAUSEF_IV); | 1291 | set_c0_cause(CAUSEF_IV); |
1193 | } | 1292 | } |
1293 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1294 | } | ||
1295 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1194 | 1296 | ||
1195 | cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; | 1297 | cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; |
1196 | TLBMISS_HANDLER_SETUP(); | 1298 | TLBMISS_HANDLER_SETUP(); |
@@ -1200,8 +1302,14 @@ void __init per_cpu_trap_init(void) | |||
1200 | BUG_ON(current->mm); | 1302 | BUG_ON(current->mm); |
1201 | enter_lazy_tlb(&init_mm, current); | 1303 | enter_lazy_tlb(&init_mm, current); |
1202 | 1304 | ||
1203 | cpu_cache_init(); | 1305 | #ifdef CONFIG_MIPS_MT_SMTC |
1204 | tlb_init(); | 1306 | if (bootTC) { |
1307 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1308 | cpu_cache_init(); | ||
1309 | tlb_init(); | ||
1310 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1311 | } | ||
1312 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1205 | } | 1313 | } |
1206 | 1314 | ||
1207 | /* Install CPU exception handler */ | 1315 | /* Install CPU exception handler */ |