diff options
Diffstat (limited to 'arch/mips/kernel/traps.c')
-rw-r--r-- | arch/mips/kernel/traps.c | 144 |
1 files changed, 131 insertions, 13 deletions
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index e0b499694d18..074e857ced28 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -10,6 +10,7 @@ | |||
10 | * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com | 10 | * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com |
11 | * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki | 11 | * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki |
12 | * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved. | 12 | * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved. |
13 | * Copyright (C) 2014, Imagination Technologies Ltd. | ||
13 | */ | 14 | */ |
14 | #include <linux/bug.h> | 15 | #include <linux/bug.h> |
15 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
@@ -47,6 +48,7 @@ | |||
47 | #include <asm/mipsregs.h> | 48 | #include <asm/mipsregs.h> |
48 | #include <asm/mipsmtregs.h> | 49 | #include <asm/mipsmtregs.h> |
49 | #include <asm/module.h> | 50 | #include <asm/module.h> |
51 | #include <asm/msa.h> | ||
50 | #include <asm/pgtable.h> | 52 | #include <asm/pgtable.h> |
51 | #include <asm/ptrace.h> | 53 | #include <asm/ptrace.h> |
52 | #include <asm/sections.h> | 54 | #include <asm/sections.h> |
@@ -77,8 +79,10 @@ extern asmlinkage void handle_ri_rdhwr(void); | |||
77 | extern asmlinkage void handle_cpu(void); | 79 | extern asmlinkage void handle_cpu(void); |
78 | extern asmlinkage void handle_ov(void); | 80 | extern asmlinkage void handle_ov(void); |
79 | extern asmlinkage void handle_tr(void); | 81 | extern asmlinkage void handle_tr(void); |
82 | extern asmlinkage void handle_msa_fpe(void); | ||
80 | extern asmlinkage void handle_fpe(void); | 83 | extern asmlinkage void handle_fpe(void); |
81 | extern asmlinkage void handle_ftlb(void); | 84 | extern asmlinkage void handle_ftlb(void); |
85 | extern asmlinkage void handle_msa(void); | ||
82 | extern asmlinkage void handle_mdmx(void); | 86 | extern asmlinkage void handle_mdmx(void); |
83 | extern asmlinkage void handle_watch(void); | 87 | extern asmlinkage void handle_watch(void); |
84 | extern asmlinkage void handle_mt(void); | 88 | extern asmlinkage void handle_mt(void); |
@@ -861,6 +865,11 @@ asmlinkage void do_bp(struct pt_regs *regs) | |||
861 | enum ctx_state prev_state; | 865 | enum ctx_state prev_state; |
862 | unsigned long epc; | 866 | unsigned long epc; |
863 | u16 instr[2]; | 867 | u16 instr[2]; |
868 | mm_segment_t seg; | ||
869 | |||
870 | seg = get_fs(); | ||
871 | if (!user_mode(regs)) | ||
872 | set_fs(KERNEL_DS); | ||
864 | 873 | ||
865 | prev_state = exception_enter(); | 874 | prev_state = exception_enter(); |
866 | if (get_isa16_mode(regs->cp0_epc)) { | 875 | if (get_isa16_mode(regs->cp0_epc)) { |
@@ -870,17 +879,19 @@ asmlinkage void do_bp(struct pt_regs *regs) | |||
870 | if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) || | 879 | if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) || |
871 | (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))) | 880 | (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))) |
872 | goto out_sigsegv; | 881 | goto out_sigsegv; |
873 | opcode = (instr[0] << 16) | instr[1]; | 882 | opcode = (instr[0] << 16) | instr[1]; |
874 | } else { | 883 | } else { |
875 | /* MIPS16e mode */ | 884 | /* MIPS16e mode */ |
876 | if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) | 885 | if (__get_user(instr[0], |
886 | (u16 __user *)msk_isa16_mode(epc))) | ||
877 | goto out_sigsegv; | 887 | goto out_sigsegv; |
878 | bcode = (instr[0] >> 6) & 0x3f; | 888 | bcode = (instr[0] >> 6) & 0x3f; |
879 | do_trap_or_bp(regs, bcode, "Break"); | 889 | do_trap_or_bp(regs, bcode, "Break"); |
880 | goto out; | 890 | goto out; |
881 | } | 891 | } |
882 | } else { | 892 | } else { |
883 | if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) | 893 | if (__get_user(opcode, |
894 | (unsigned int __user *) exception_epc(regs))) | ||
884 | goto out_sigsegv; | 895 | goto out_sigsegv; |
885 | } | 896 | } |
886 | 897 | ||
@@ -918,6 +929,7 @@ asmlinkage void do_bp(struct pt_regs *regs) | |||
918 | do_trap_or_bp(regs, bcode, "Break"); | 929 | do_trap_or_bp(regs, bcode, "Break"); |
919 | 930 | ||
920 | out: | 931 | out: |
932 | set_fs(seg); | ||
921 | exception_exit(prev_state); | 933 | exception_exit(prev_state); |
922 | return; | 934 | return; |
923 | 935 | ||
@@ -931,8 +943,13 @@ asmlinkage void do_tr(struct pt_regs *regs) | |||
931 | u32 opcode, tcode = 0; | 943 | u32 opcode, tcode = 0; |
932 | enum ctx_state prev_state; | 944 | enum ctx_state prev_state; |
933 | u16 instr[2]; | 945 | u16 instr[2]; |
946 | mm_segment_t seg; | ||
934 | unsigned long epc = msk_isa16_mode(exception_epc(regs)); | 947 | unsigned long epc = msk_isa16_mode(exception_epc(regs)); |
935 | 948 | ||
949 | seg = get_fs(); | ||
950 | if (!user_mode(regs)) | ||
951 | set_fs(get_ds()); | ||
952 | |||
936 | prev_state = exception_enter(); | 953 | prev_state = exception_enter(); |
937 | if (get_isa16_mode(regs->cp0_epc)) { | 954 | if (get_isa16_mode(regs->cp0_epc)) { |
938 | if (__get_user(instr[0], (u16 __user *)(epc + 0)) || | 955 | if (__get_user(instr[0], (u16 __user *)(epc + 0)) || |
@@ -953,6 +970,7 @@ asmlinkage void do_tr(struct pt_regs *regs) | |||
953 | do_trap_or_bp(regs, tcode, "Trap"); | 970 | do_trap_or_bp(regs, tcode, "Trap"); |
954 | 971 | ||
955 | out: | 972 | out: |
973 | set_fs(seg); | ||
956 | exception_exit(prev_state); | 974 | exception_exit(prev_state); |
957 | return; | 975 | return; |
958 | 976 | ||
@@ -1074,6 +1092,76 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action, | |||
1074 | return NOTIFY_OK; | 1092 | return NOTIFY_OK; |
1075 | } | 1093 | } |
1076 | 1094 | ||
1095 | static int enable_restore_fp_context(int msa) | ||
1096 | { | ||
1097 | int err, was_fpu_owner; | ||
1098 | |||
1099 | if (!used_math()) { | ||
1100 | /* First time FP context user. */ | ||
1101 | err = init_fpu(); | ||
1102 | if (msa && !err) | ||
1103 | enable_msa(); | ||
1104 | if (!err) | ||
1105 | set_used_math(); | ||
1106 | return err; | ||
1107 | } | ||
1108 | |||
1109 | /* | ||
1110 | * This task has formerly used the FP context. | ||
1111 | * | ||
1112 | * If this thread has no live MSA vector context then we can simply | ||
1113 | * restore the scalar FP context. If it has live MSA vector context | ||
1114 | * (that is, it has or may have used MSA since last performing a | ||
1115 | * function call) then we'll need to restore the vector context. This | ||
1116 | * applies even if we're currently only executing a scalar FP | ||
1117 | * instruction. This is because if we were to later execute an MSA | ||
1118 | * instruction then we'd either have to: | ||
1119 | * | ||
1120 | * - Restore the vector context & clobber any registers modified by | ||
1121 | * scalar FP instructions between now & then. | ||
1122 | * | ||
1123 | * or | ||
1124 | * | ||
1125 | * - Not restore the vector context & lose the most significant bits | ||
1126 | * of all vector registers. | ||
1127 | * | ||
1128 | * Neither of those options is acceptable. We cannot restore the least | ||
1129 | * significant bits of the registers now & only restore the most | ||
1130 | * significant bits later because the most significant bits of any | ||
1131 | * vector registers whose aliased FP register is modified now will have | ||
1132 | * been zeroed. We'd have no way to know that when restoring the vector | ||
1133 | * context & thus may load an outdated value for the most significant | ||
1134 | * bits of a vector register. | ||
1135 | */ | ||
1136 | if (!msa && !thread_msa_context_live()) | ||
1137 | return own_fpu(1); | ||
1138 | |||
1139 | /* | ||
1140 | * This task is using or has previously used MSA. Thus we require | ||
1141 | * that Status.FR == 1. | ||
1142 | */ | ||
1143 | was_fpu_owner = is_fpu_owner(); | ||
1144 | err = own_fpu(0); | ||
1145 | if (err) | ||
1146 | return err; | ||
1147 | |||
1148 | enable_msa(); | ||
1149 | write_msa_csr(current->thread.fpu.msacsr); | ||
1150 | set_thread_flag(TIF_USEDMSA); | ||
1151 | |||
1152 | /* | ||
1153 | * If this is the first time that the task is using MSA and it has | ||
1154 | * previously used scalar FP in this time slice then we already nave | ||
1155 | * FP context which we shouldn't clobber. | ||
1156 | */ | ||
1157 | if (!test_and_set_thread_flag(TIF_MSA_CTX_LIVE) && was_fpu_owner) | ||
1158 | return 0; | ||
1159 | |||
1160 | /* We need to restore the vector context. */ | ||
1161 | restore_msa(current); | ||
1162 | return 0; | ||
1163 | } | ||
1164 | |||
1077 | asmlinkage void do_cpu(struct pt_regs *regs) | 1165 | asmlinkage void do_cpu(struct pt_regs *regs) |
1078 | { | 1166 | { |
1079 | enum ctx_state prev_state; | 1167 | enum ctx_state prev_state; |
@@ -1153,12 +1241,7 @@ asmlinkage void do_cpu(struct pt_regs *regs) | |||
1153 | /* Fall through. */ | 1241 | /* Fall through. */ |
1154 | 1242 | ||
1155 | case 1: | 1243 | case 1: |
1156 | if (used_math()) /* Using the FPU again. */ | 1244 | err = enable_restore_fp_context(0); |
1157 | err = own_fpu(1); | ||
1158 | else { /* First time FPU user. */ | ||
1159 | err = init_fpu(); | ||
1160 | set_used_math(); | ||
1161 | } | ||
1162 | 1245 | ||
1163 | if (!raw_cpu_has_fpu || err) { | 1246 | if (!raw_cpu_has_fpu || err) { |
1164 | int sig; | 1247 | int sig; |
@@ -1183,6 +1266,37 @@ out: | |||
1183 | exception_exit(prev_state); | 1266 | exception_exit(prev_state); |
1184 | } | 1267 | } |
1185 | 1268 | ||
1269 | asmlinkage void do_msa_fpe(struct pt_regs *regs) | ||
1270 | { | ||
1271 | enum ctx_state prev_state; | ||
1272 | |||
1273 | prev_state = exception_enter(); | ||
1274 | die_if_kernel("do_msa_fpe invoked from kernel context!", regs); | ||
1275 | force_sig(SIGFPE, current); | ||
1276 | exception_exit(prev_state); | ||
1277 | } | ||
1278 | |||
1279 | asmlinkage void do_msa(struct pt_regs *regs) | ||
1280 | { | ||
1281 | enum ctx_state prev_state; | ||
1282 | int err; | ||
1283 | |||
1284 | prev_state = exception_enter(); | ||
1285 | |||
1286 | if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) { | ||
1287 | force_sig(SIGILL, current); | ||
1288 | goto out; | ||
1289 | } | ||
1290 | |||
1291 | die_if_kernel("do_msa invoked from kernel context!", regs); | ||
1292 | |||
1293 | err = enable_restore_fp_context(1); | ||
1294 | if (err) | ||
1295 | force_sig(SIGILL, current); | ||
1296 | out: | ||
1297 | exception_exit(prev_state); | ||
1298 | } | ||
1299 | |||
1186 | asmlinkage void do_mdmx(struct pt_regs *regs) | 1300 | asmlinkage void do_mdmx(struct pt_regs *regs) |
1187 | { | 1301 | { |
1188 | enum ctx_state prev_state; | 1302 | enum ctx_state prev_state; |
@@ -1337,8 +1451,10 @@ static inline void parity_protection_init(void) | |||
1337 | case CPU_34K: | 1451 | case CPU_34K: |
1338 | case CPU_74K: | 1452 | case CPU_74K: |
1339 | case CPU_1004K: | 1453 | case CPU_1004K: |
1454 | case CPU_1074K: | ||
1340 | case CPU_INTERAPTIV: | 1455 | case CPU_INTERAPTIV: |
1341 | case CPU_PROAPTIV: | 1456 | case CPU_PROAPTIV: |
1457 | case CPU_P5600: | ||
1342 | { | 1458 | { |
1343 | #define ERRCTL_PE 0x80000000 | 1459 | #define ERRCTL_PE 0x80000000 |
1344 | #define ERRCTL_L2P 0x00800000 | 1460 | #define ERRCTL_L2P 0x00800000 |
@@ -2017,6 +2133,7 @@ void __init trap_init(void) | |||
2017 | set_except_vector(11, handle_cpu); | 2133 | set_except_vector(11, handle_cpu); |
2018 | set_except_vector(12, handle_ov); | 2134 | set_except_vector(12, handle_ov); |
2019 | set_except_vector(13, handle_tr); | 2135 | set_except_vector(13, handle_tr); |
2136 | set_except_vector(14, handle_msa_fpe); | ||
2020 | 2137 | ||
2021 | if (current_cpu_type() == CPU_R6000 || | 2138 | if (current_cpu_type() == CPU_R6000 || |
2022 | current_cpu_type() == CPU_R6000A) { | 2139 | current_cpu_type() == CPU_R6000A) { |
@@ -2040,6 +2157,7 @@ void __init trap_init(void) | |||
2040 | set_except_vector(15, handle_fpe); | 2157 | set_except_vector(15, handle_fpe); |
2041 | 2158 | ||
2042 | set_except_vector(16, handle_ftlb); | 2159 | set_except_vector(16, handle_ftlb); |
2160 | set_except_vector(21, handle_msa); | ||
2043 | set_except_vector(22, handle_mdmx); | 2161 | set_except_vector(22, handle_mdmx); |
2044 | 2162 | ||
2045 | if (cpu_has_mcheck) | 2163 | if (cpu_has_mcheck) |