aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel/unaligned.c
diff options
context:
space:
mode:
authorLeonid Yegoshin <Leonid.Yegoshin@imgtec.com>2015-06-22 07:21:00 -0400
committerRalf Baechle <ralf@linux-mips.org>2015-09-03 06:07:40 -0400
commite4aa1f153add29343eeb8b3bf0f64e7c6fc7e697 (patch)
treec934042d376e837949813f50d4f6dce576fb59eb /arch/mips/kernel/unaligned.c
parent6b35e11442db48638c9e9f2ff19f706484a73abe (diff)
MIPS: MSA unaligned memory access support
The MSA architecture specification allows for hardware to not implement unaligned vector memory accesses in some or all cases. A typical example of this is the I6400 core which does not implement unaligned vector memory access when the memory crosses a page boundary. The architecture also requires that such memory accesses complete successfully as far as userland is concerned, so the kernel is required to emulate them. This patch implements support for emulating unaligned MSA ld & st instructions by copying between the user memory & the tasks FP context in struct thread_struct, updating hardware registers from there as appropriate in order to avoid saving & restoring the entire vector context for each unaligned memory access. Tested both using an I6400 CPU and with a QEMU build hacked to produce AdEL exceptions for unaligned vector memory accesses. [paul.burton@imgtec.com: - Remove #ifdef's - Move msa_op into enum major_op rather than #define - Replace msa_{to,from}_wd with {read,write}_msa_wr_{b,h,w,l} and the format-agnostic wrappers, removing the custom endian mangling for big endian systems. - Restructure the msa_op case in emulate_load_store_insn to share more code between the load & store cases. - Avoid the need for a temporary union fpureg on the stack by simply reusing the already suitably aligned context in struct thread_struct. - Use sizeof(*fpr) rather than hardcoding 16 as the size for user memory checks & copies. - Stop recalculating the address of the unaligned vector memory access and rely upon the value read from BadVAddr as we do for other unaligned memory access instructions. - Drop the now unused val8 & val16 fields in union fpureg. - Rewrite commit message. - General formatting cleanups.] Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: linux-mips@linux-mips.org Cc: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com> Cc: Huacai Chen <chenhc@lemote.com> Cc: Maciej W. Rozycki <macro@linux-mips.org> Cc: linux-kernel@vger.kernel.org Cc: Jie Chen <chenj@lemote.com> Cc: Markos Chandras <markos.chandras@imgtec.com> Patchwork: https://patchwork.linux-mips.org/patch/10573/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kernel/unaligned.c')
-rw-r--r--arch/mips/kernel/unaligned.c72
1 files changed, 72 insertions, 0 deletions
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index eb3efd137fd1..f55869c00ce4 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -891,6 +891,9 @@ static void emulate_load_store_insn(struct pt_regs *regs,
891#ifdef CONFIG_EVA 891#ifdef CONFIG_EVA
892 mm_segment_t seg; 892 mm_segment_t seg;
893#endif 893#endif
894 union fpureg *fpr;
895 enum msa_2b_fmt df;
896 unsigned int wd;
894 origpc = (unsigned long)pc; 897 origpc = (unsigned long)pc;
895 orig31 = regs->regs[31]; 898 orig31 = regs->regs[31];
896 899
@@ -1202,6 +1205,75 @@ static void emulate_load_store_insn(struct pt_regs *regs,
1202 break; 1205 break;
1203 return; 1206 return;
1204 1207
1208 case msa_op:
1209 if (!cpu_has_msa)
1210 goto sigill;
1211
1212 /*
1213 * If we've reached this point then userland should have taken
1214 * the MSA disabled exception & initialised vector context at
1215 * some point in the past.
1216 */
1217 BUG_ON(!thread_msa_context_live());
1218
1219 df = insn.msa_mi10_format.df;
1220 wd = insn.msa_mi10_format.wd;
1221 fpr = &current->thread.fpu.fpr[wd];
1222
1223 switch (insn.msa_mi10_format.func) {
1224 case msa_ld_op:
1225 if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
1226 goto sigbus;
1227
1228 /*
1229 * Disable preemption to avoid a race between copying
1230 * state from userland, migrating to another CPU and
1231 * updating the hardware vector register below.
1232 */
1233 preempt_disable();
1234
1235 res = __copy_from_user_inatomic(fpr, addr,
1236 sizeof(*fpr));
1237 if (res)
1238 goto fault;
1239
1240 /*
1241 * Update the hardware register if it is in use by the
1242 * task in this quantum, in order to avoid having to
1243 * save & restore the whole vector context.
1244 */
1245 if (test_thread_flag(TIF_USEDMSA))
1246 write_msa_wr(wd, fpr, df);
1247
1248 preempt_enable();
1249 break;
1250
1251 case msa_st_op:
1252 if (!access_ok(VERIFY_WRITE, addr, sizeof(*fpr)))
1253 goto sigbus;
1254
1255 /*
1256 * Update from the hardware register if it is in use by
1257 * the task in this quantum, in order to avoid having to
1258 * save & restore the whole vector context.
1259 */
1260 preempt_disable();
1261 if (test_thread_flag(TIF_USEDMSA))
1262 read_msa_wr(wd, fpr, df);
1263 preempt_enable();
1264
1265 res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
1266 if (res)
1267 goto fault;
1268 break;
1269
1270 default:
1271 goto sigbus;
1272 }
1273
1274 compute_return_epc(regs);
1275 break;
1276
1205#ifndef CONFIG_CPU_MIPSR6 1277#ifndef CONFIG_CPU_MIPSR6
1206 /* 1278 /*
1207 * COP2 is available to implementor for application specific use. 1279 * COP2 is available to implementor for application specific use.