aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCyril Bur <cyrilbur@gmail.com>2016-09-23 02:18:08 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2016-10-04 01:43:05 -0400
commitdc16b553c949e81f37555777dc7bab66d78285a7 (patch)
tree232193ebddf36323a7611d5118e5bdad540edde5
parent0e7736c6b806b24c693367196a076c78328ed742 (diff)
powerpc: Always restore FPU/VEC/VSX if hardware transactional memory in use
Comment from arch/powerpc/kernel/process.c:967: If userspace is inside a transaction (whether active or suspended) and FP/VMX/VSX instructions have ever been enabled inside that transaction, then we have to keep them enabled and keep the FP/VMX/VSX state loaded while ever the transaction continues. The reason is that if we didn't, and subsequently got a FP/VMX/VSX unavailable interrupt inside a transaction, we don't know whether it's the same transaction, and thus we don't know which of the checkpointed state and the ransactional state to use. restore_math() restore_fp() and restore_altivec() currently may not restore the registers. It doesn't appear that this is more serious than a performance penalty. If the math registers aren't restored the userspace thread will still be run with the facility disabled. Userspace will not be able to read invalid values. On the first access it will take an facility unavailable exception and the kernel will detected an active transaction, at which point it will abort the transaction. There is the possibility for a pathological case preventing any progress by transactions, however, transactions are never guaranteed to make progress. Fixes: 70fe3d9 ("powerpc: Restore FPU/VEC/VSX if previously used") Signed-off-by: Cyril Bur <cyrilbur@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/kernel/process.c21
1 files changed, 18 insertions, 3 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ce8a26a0c947..3846fab5d1ce 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -89,7 +89,13 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
89 set_thread_flag(TIF_RESTORE_TM); 89 set_thread_flag(TIF_RESTORE_TM);
90 } 90 }
91} 91}
92
93static inline bool msr_tm_active(unsigned long msr)
94{
95 return MSR_TM_ACTIVE(msr);
96}
92#else 97#else
98static inline bool msr_tm_active(unsigned long msr) { return false; }
93static inline void check_if_tm_restore_required(struct task_struct *tsk) { } 99static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
94#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 100#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
95 101
@@ -209,7 +215,7 @@ void enable_kernel_fp(void)
209EXPORT_SYMBOL(enable_kernel_fp); 215EXPORT_SYMBOL(enable_kernel_fp);
210 216
211static int restore_fp(struct task_struct *tsk) { 217static int restore_fp(struct task_struct *tsk) {
212 if (tsk->thread.load_fp) { 218 if (tsk->thread.load_fp || msr_tm_active(tsk->thread.regs->msr)) {
213 load_fp_state(&current->thread.fp_state); 219 load_fp_state(&current->thread.fp_state);
214 current->thread.load_fp++; 220 current->thread.load_fp++;
215 return 1; 221 return 1;
@@ -279,7 +285,8 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
279 285
280static int restore_altivec(struct task_struct *tsk) 286static int restore_altivec(struct task_struct *tsk)
281{ 287{
282 if (cpu_has_feature(CPU_FTR_ALTIVEC) && tsk->thread.load_vec) { 288 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
289 (tsk->thread.load_vec || msr_tm_active(tsk->thread.regs->msr))) {
283 load_vr_state(&tsk->thread.vr_state); 290 load_vr_state(&tsk->thread.vr_state);
284 tsk->thread.used_vr = 1; 291 tsk->thread.used_vr = 1;
285 tsk->thread.load_vec++; 292 tsk->thread.load_vec++;
@@ -465,7 +472,8 @@ void restore_math(struct pt_regs *regs)
465{ 472{
466 unsigned long msr; 473 unsigned long msr;
467 474
468 if (!current->thread.load_fp && !loadvec(current->thread)) 475 if (!msr_tm_active(regs->msr) &&
476 !current->thread.load_fp && !loadvec(current->thread))
469 return; 477 return;
470 478
471 msr = regs->msr; 479 msr = regs->msr;
@@ -984,6 +992,13 @@ void restore_tm_state(struct pt_regs *regs)
984 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr; 992 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
985 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX; 993 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
986 994
995 /* Ensure that restore_math() will restore */
996 if (msr_diff & MSR_FP)
997 current->thread.load_fp = 1;
998#ifdef CONFIG_ALIVEC
999 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1000 current->thread.load_vec = 1;
1001#endif
987 restore_math(regs); 1002 restore_math(regs);
988 1003
989 regs->msr |= msr_diff; 1004 regs->msr |= msr_diff;