diff options
author | Simon Guo <wei.guo.simon@gmail.com> | 2018-05-23 03:01:59 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@ozlabs.org> | 2018-05-31 20:30:00 -0400 |
commit | 13989b65ebb74c05c577dbbcc111e1fdd7da763a (patch) | |
tree | 5430a76d199a1bde726f9de6849a1000e88474b9 | |
parent | 8d2e2fc5e082a7b3f858cefb6e65700f28d2955e (diff) |
KVM: PPC: Book3S PR: Add math support for PR KVM HTM
The math registers will be saved into vcpu->arch.fp/vr and corresponding
vcpu->arch.fp_tm/vr_tm area.
We flush or giveup the math regs into vcpu->arch.fp/vr before saving
transaction. After transaction is restored, the math regs will be loaded
back into regs.
If there is a FP/VEC/VSX unavailable exception during transaction active
state, the math checkpoint content might be incorrect and we need to do
treclaim./load the correct checkpoint val/trechkpt. sequence to retry the
transaction. That will make our solution complicated. To solve this issue,
we always make the hardware guest MSR math bits (shadow_msr) consistent
with the MSR val which guest sees (kvmppc_get_msr()) when guest msr is
with tm enabled. Then all FP/VEC/VSX unavailable exception can be delivered
to guest and guest handles the exception by itself.
Signed-off-by: Simon Guo <wei.guo.simon@gmail.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
-rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 35 |
1 files changed, 35 insertions, 0 deletions
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index a14721f034fb..dcb577fde9cd 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -308,6 +308,28 @@ static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) | |||
308 | tm_disable(); | 308 | tm_disable(); |
309 | } | 309 | } |
310 | 310 | ||
311 | /* loadup math bits which is enabled at kvmppc_get_msr() but not enabled at | ||
312 | * hardware. | ||
313 | */ | ||
314 | static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu) | ||
315 | { | ||
316 | ulong exit_nr; | ||
317 | ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) & | ||
318 | (MSR_FP | MSR_VEC | MSR_VSX); | ||
319 | |||
320 | if (!ext_diff) | ||
321 | return; | ||
322 | |||
323 | if (ext_diff == MSR_FP) | ||
324 | exit_nr = BOOK3S_INTERRUPT_FP_UNAVAIL; | ||
325 | else if (ext_diff == MSR_VEC) | ||
326 | exit_nr = BOOK3S_INTERRUPT_ALTIVEC; | ||
327 | else | ||
328 | exit_nr = BOOK3S_INTERRUPT_VSX; | ||
329 | |||
330 | kvmppc_handle_ext(vcpu, exit_nr, ext_diff); | ||
331 | } | ||
332 | |||
311 | void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) | 333 | void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) |
312 | { | 334 | { |
313 | if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) { | 335 | if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) { |
@@ -315,6 +337,8 @@ void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) | |||
315 | return; | 337 | return; |
316 | } | 338 | } |
317 | 339 | ||
340 | kvmppc_giveup_ext(vcpu, MSR_VSX); | ||
341 | |||
318 | preempt_disable(); | 342 | preempt_disable(); |
319 | _kvmppc_save_tm_pr(vcpu, mfmsr()); | 343 | _kvmppc_save_tm_pr(vcpu, mfmsr()); |
320 | preempt_enable(); | 344 | preempt_enable(); |
@@ -324,12 +348,18 @@ void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) | |||
324 | { | 348 | { |
325 | if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) { | 349 | if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) { |
326 | kvmppc_restore_tm_sprs(vcpu); | 350 | kvmppc_restore_tm_sprs(vcpu); |
351 | if (kvmppc_get_msr(vcpu) & MSR_TM) | ||
352 | kvmppc_handle_lost_math_exts(vcpu); | ||
327 | return; | 353 | return; |
328 | } | 354 | } |
329 | 355 | ||
330 | preempt_disable(); | 356 | preempt_disable(); |
331 | _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu)); | 357 | _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu)); |
332 | preempt_enable(); | 358 | preempt_enable(); |
359 | |||
360 | if (kvmppc_get_msr(vcpu) & MSR_TM) | ||
361 | kvmppc_handle_lost_math_exts(vcpu); | ||
362 | |||
333 | } | 363 | } |
334 | #endif | 364 | #endif |
335 | 365 | ||
@@ -468,6 +498,11 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) | |||
468 | /* Preload FPU if it's enabled */ | 498 | /* Preload FPU if it's enabled */ |
469 | if (kvmppc_get_msr(vcpu) & MSR_FP) | 499 | if (kvmppc_get_msr(vcpu) & MSR_FP) |
470 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | 500 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
501 | |||
502 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
503 | if (kvmppc_get_msr(vcpu) & MSR_TM) | ||
504 | kvmppc_handle_lost_math_exts(vcpu); | ||
505 | #endif | ||
471 | } | 506 | } |
472 | 507 | ||
473 | void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) | 508 | void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) |