aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2014-01-12 23:56:30 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-01-14 21:59:14 -0500
commit3ac8ff1c475bda7174fce63230c0932454287cd5 (patch)
tree080d06818982dfad75939133faa4ac34be6f348d
parentd31626f70b6103f4d9153b75d07e0e8795728cc9 (diff)
powerpc: Fix transactional FP/VMX/VSX unavailable handlers
Currently, if a process starts a transaction and then takes an exception because the FPU, VMX or VSX unit is unavailable to it, we end up corrupting any FP/VMX/VSX state that was valid before the interrupt. For example, if the process starts a transaction with the FPU available to it but VMX unavailable, and then does a VMX instruction inside the transaction, the FP state gets corrupted. Loading up the desired state generally involves doing a reclaim and a recheckpoint. To avoid corrupting already-valid state, we have to be careful not to reload that state from the thread_struct between the reclaim and the recheckpoint (since the thread_struct values are stale by now), and we have to reload that state from the transact_fp/vr arrays after the recheckpoint to get back the current transactional values saved there by the reclaim. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--arch/powerpc/kernel/traps.c45
1 files changed, 36 insertions, 9 deletions
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 26e1358ff0bc..33cd7a0b8e73 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1416,11 +1416,19 @@ void fp_unavailable_tm(struct pt_regs *regs)
1416 /* This loads and recheckpoints the FP registers from 1416 /* This loads and recheckpoints the FP registers from
1417 * thread.fpr[]. They will remain in registers after the 1417 * thread.fpr[]. They will remain in registers after the
1418 * checkpoint so we don't need to reload them after. 1418 * checkpoint so we don't need to reload them after.
1419 * If VMX is in use, the VRs now hold checkpointed values,
1420 * so we don't want to load the VRs from the thread_struct.
1419 */ 1421 */
1420 tm_recheckpoint(&current->thread, regs->msr); 1422 tm_recheckpoint(&current->thread, MSR_FP);
1423
1424 /* If VMX is in use, get the transactional values back */
1425 if (regs->msr & MSR_VEC) {
1426 do_load_up_transact_altivec(&current->thread);
1427 /* At this point all the VSX state is loaded, so enable it */
1428 regs->msr |= MSR_VSX;
1429 }
1421} 1430}
1422 1431
1423#ifdef CONFIG_ALTIVEC
1424void altivec_unavailable_tm(struct pt_regs *regs) 1432void altivec_unavailable_tm(struct pt_regs *regs)
1425{ 1433{
1426 /* See the comments in fp_unavailable_tm(). This function operates 1434 /* See the comments in fp_unavailable_tm(). This function operates
@@ -1432,14 +1440,19 @@ void altivec_unavailable_tm(struct pt_regs *regs)
1432 regs->nip, regs->msr); 1440 regs->nip, regs->msr);
1433 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1441 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1434 regs->msr |= MSR_VEC; 1442 regs->msr |= MSR_VEC;
1435 tm_recheckpoint(&current->thread, regs->msr); 1443 tm_recheckpoint(&current->thread, MSR_VEC);
1436 current->thread.used_vr = 1; 1444 current->thread.used_vr = 1;
1445
1446 if (regs->msr & MSR_FP) {
1447 do_load_up_transact_fpu(&current->thread);
1448 regs->msr |= MSR_VSX;
1449 }
1437} 1450}
1438#endif
1439 1451
1440#ifdef CONFIG_VSX
1441void vsx_unavailable_tm(struct pt_regs *regs) 1452void vsx_unavailable_tm(struct pt_regs *regs)
1442{ 1453{
1454 unsigned long orig_msr = regs->msr;
1455
1443 /* See the comments in fp_unavailable_tm(). This works similarly, 1456 /* See the comments in fp_unavailable_tm(). This works similarly,
1444 * though we're loading both FP and VEC registers in here. 1457 * though we're loading both FP and VEC registers in here.
1445 * 1458 *
@@ -1451,16 +1464,30 @@ void vsx_unavailable_tm(struct pt_regs *regs)
1451 "MSR=%lx\n", 1464 "MSR=%lx\n",
1452 regs->nip, regs->msr); 1465 regs->nip, regs->msr);
1453 1466
1467 current->thread.used_vsr = 1;
1468
1469 /* If FP and VMX are already loaded, we have all the state we need */
1470 if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) {
1471 regs->msr |= MSR_VSX;
1472 return;
1473 }
1474
1454 /* This reclaims FP and/or VR regs if they're already enabled */ 1475 /* This reclaims FP and/or VR regs if they're already enabled */
1455 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1476 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1456 1477
1457 regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode | 1478 regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode |
1458 MSR_VSX; 1479 MSR_VSX;
1459 /* This loads & recheckpoints FP and VRs. */ 1480
1460 tm_recheckpoint(&current->thread, regs->msr); 1481 /* This loads & recheckpoints FP and VRs; but we have
1461 current->thread.used_vsr = 1; 1482 * to be sure not to overwrite previously-valid state.
1483 */
1484 tm_recheckpoint(&current->thread, regs->msr & ~orig_msr);
1485
1486 if (orig_msr & MSR_FP)
1487 do_load_up_transact_fpu(&current->thread);
1488 if (orig_msr & MSR_VEC)
1489 do_load_up_transact_altivec(&current->thread);
1462} 1490}
1463#endif
1464#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1491#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1465 1492
1466void performance_monitor_exception(struct pt_regs *regs) 1493void performance_monitor_exception(struct pt_regs *regs)