diff options
Diffstat (limited to 'arch/mips/kernel/traps.c')
-rw-r--r-- | arch/mips/kernel/traps.c | 61 |
1 files changed, 52 insertions, 9 deletions
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 51706d6dd5b0..22b19c275044 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -90,6 +90,7 @@ extern asmlinkage void handle_mt(void); | |||
90 | extern asmlinkage void handle_dsp(void); | 90 | extern asmlinkage void handle_dsp(void); |
91 | extern asmlinkage void handle_mcheck(void); | 91 | extern asmlinkage void handle_mcheck(void); |
92 | extern asmlinkage void handle_reserved(void); | 92 | extern asmlinkage void handle_reserved(void); |
93 | extern void tlb_do_page_fault_0(void); | ||
93 | 94 | ||
94 | void (*board_be_init)(void); | 95 | void (*board_be_init)(void); |
95 | int (*board_be_handler)(struct pt_regs *regs, int is_fixup); | 96 | int (*board_be_handler)(struct pt_regs *regs, int is_fixup); |
@@ -1088,13 +1089,19 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action, | |||
1088 | 1089 | ||
1089 | static int enable_restore_fp_context(int msa) | 1090 | static int enable_restore_fp_context(int msa) |
1090 | { | 1091 | { |
1091 | int err, was_fpu_owner; | 1092 | int err, was_fpu_owner, prior_msa; |
1092 | 1093 | ||
1093 | if (!used_math()) { | 1094 | if (!used_math()) { |
1094 | /* First time FP context user. */ | 1095 | /* First time FP context user. */ |
1096 | preempt_disable(); | ||
1095 | err = init_fpu(); | 1097 | err = init_fpu(); |
1096 | if (msa && !err) | 1098 | if (msa && !err) { |
1097 | enable_msa(); | 1099 | enable_msa(); |
1100 | _init_msa_upper(); | ||
1101 | set_thread_flag(TIF_USEDMSA); | ||
1102 | set_thread_flag(TIF_MSA_CTX_LIVE); | ||
1103 | } | ||
1104 | preempt_enable(); | ||
1098 | if (!err) | 1105 | if (!err) |
1099 | set_used_math(); | 1106 | set_used_math(); |
1100 | return err; | 1107 | return err; |
@@ -1134,10 +1141,11 @@ static int enable_restore_fp_context(int msa) | |||
1134 | * This task is using or has previously used MSA. Thus we require | 1141 | * This task is using or has previously used MSA. Thus we require |
1135 | * that Status.FR == 1. | 1142 | * that Status.FR == 1. |
1136 | */ | 1143 | */ |
1144 | preempt_disable(); | ||
1137 | was_fpu_owner = is_fpu_owner(); | 1145 | was_fpu_owner = is_fpu_owner(); |
1138 | err = own_fpu(0); | 1146 | err = own_fpu_inatomic(0); |
1139 | if (err) | 1147 | if (err) |
1140 | return err; | 1148 | goto out; |
1141 | 1149 | ||
1142 | enable_msa(); | 1150 | enable_msa(); |
1143 | write_msa_csr(current->thread.fpu.msacsr); | 1151 | write_msa_csr(current->thread.fpu.msacsr); |
@@ -1146,13 +1154,42 @@ static int enable_restore_fp_context(int msa) | |||
1146 | /* | 1154 | /* |
1147 | * If this is the first time that the task is using MSA and it has | 1155 | * If this is the first time that the task is using MSA and it has |
1148 | * previously used scalar FP in this time slice then we already nave | 1156 | * previously used scalar FP in this time slice then we already nave |
1149 | * FP context which we shouldn't clobber. | 1157 | * FP context which we shouldn't clobber. We do however need to clear |
1158 | * the upper 64b of each vector register so that this task has no | ||
1159 | * opportunity to see data left behind by another. | ||
1150 | */ | 1160 | */ |
1151 | if (!test_and_set_thread_flag(TIF_MSA_CTX_LIVE) && was_fpu_owner) | 1161 | prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE); |
1152 | return 0; | 1162 | if (!prior_msa && was_fpu_owner) { |
1163 | _init_msa_upper(); | ||
1164 | |||
1165 | goto out; | ||
1166 | } | ||
1167 | |||
1168 | if (!prior_msa) { | ||
1169 | /* | ||
1170 | * Restore the least significant 64b of each vector register | ||
1171 | * from the existing scalar FP context. | ||
1172 | */ | ||
1173 | _restore_fp(current); | ||
1174 | |||
1175 | /* | ||
1176 | * The task has not formerly used MSA, so clear the upper 64b | ||
1177 | * of each vector register such that it cannot see data left | ||
1178 | * behind by another task. | ||
1179 | */ | ||
1180 | _init_msa_upper(); | ||
1181 | } else { | ||
1182 | /* We need to restore the vector context. */ | ||
1183 | restore_msa(current); | ||
1184 | |||
1185 | /* Restore the scalar FP control & status register */ | ||
1186 | if (!was_fpu_owner) | ||
1187 | asm volatile("ctc1 %0, $31" : : "r"(current->thread.fpu.fcr31)); | ||
1188 | } | ||
1189 | |||
1190 | out: | ||
1191 | preempt_enable(); | ||
1153 | 1192 | ||
1154 | /* We need to restore the vector context. */ | ||
1155 | restore_msa(current); | ||
1156 | return 0; | 1193 | return 0; |
1157 | } | 1194 | } |
1158 | 1195 | ||
@@ -2114,6 +2151,12 @@ void __init trap_init(void) | |||
2114 | set_except_vector(15, handle_fpe); | 2151 | set_except_vector(15, handle_fpe); |
2115 | 2152 | ||
2116 | set_except_vector(16, handle_ftlb); | 2153 | set_except_vector(16, handle_ftlb); |
2154 | |||
2155 | if (cpu_has_rixiex) { | ||
2156 | set_except_vector(19, tlb_do_page_fault_0); | ||
2157 | set_except_vector(20, tlb_do_page_fault_0); | ||
2158 | } | ||
2159 | |||
2117 | set_except_vector(21, handle_msa); | 2160 | set_except_vector(21, handle_msa); |
2118 | set_except_vector(22, handle_mdmx); | 2161 | set_except_vector(22, handle_mdmx); |
2119 | 2162 | ||