aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_hv.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv.c51
1 files changed, 51 insertions, 0 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 42b7a4fd57d9..8d1a365b8edc 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1486,6 +1486,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1486 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); 1486 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
1487 break; 1487 break;
1488 case KVM_REG_PPC_TB_OFFSET: 1488 case KVM_REG_PPC_TB_OFFSET:
1489 /*
1490 * POWER9 DD1 has an erratum where writing TBU40 causes
1491 * the timebase to lose ticks. So we don't let the
1492 * timebase offset be changed on P9 DD1. (It is
1493 * initialized to zero.)
1494 */
1495 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
1496 break;
1489 /* round up to multiple of 2^24 */ 1497 /* round up to multiple of 2^24 */
1490 vcpu->arch.vcore->tb_offset = 1498 vcpu->arch.vcore->tb_offset =
1491 ALIGN(set_reg_val(id, *val), 1UL << 24); 1499 ALIGN(set_reg_val(id, *val), 1UL << 24);
@@ -2907,12 +2915,36 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
2907{ 2915{
2908 int r; 2916 int r;
2909 int srcu_idx; 2917 int srcu_idx;
2918 unsigned long ebb_regs[3] = {}; /* shut up GCC */
2919 unsigned long user_tar = 0;
2920 unsigned int user_vrsave;
2910 2921
2911 if (!vcpu->arch.sane) { 2922 if (!vcpu->arch.sane) {
2912 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 2923 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2913 return -EINVAL; 2924 return -EINVAL;
2914 } 2925 }
2915 2926
2927 /*
2928 * Don't allow entry with a suspended transaction, because
2929 * the guest entry/exit code will lose it.
2930 * If the guest has TM enabled, save away their TM-related SPRs
2931 * (they will get restored by the TM unavailable interrupt).
2932 */
2933#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2934 if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
2935 (current->thread.regs->msr & MSR_TM)) {
2936 if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
2937 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2938 run->fail_entry.hardware_entry_failure_reason = 0;
2939 return -EINVAL;
2940 }
2941 current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
2942 current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
2943 current->thread.tm_texasr = mfspr(SPRN_TEXASR);
2944 current->thread.regs->msr &= ~MSR_TM;
2945 }
2946#endif
2947
2916 kvmppc_core_prepare_to_enter(vcpu); 2948 kvmppc_core_prepare_to_enter(vcpu);
2917 2949
2918 /* No need to go into the guest when all we'll do is come back out */ 2950 /* No need to go into the guest when all we'll do is come back out */
@@ -2934,6 +2966,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
2934 2966
2935 flush_all_to_thread(current); 2967 flush_all_to_thread(current);
2936 2968
2969 /* Save userspace EBB and other register values */
2970 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
2971 ebb_regs[0] = mfspr(SPRN_EBBHR);
2972 ebb_regs[1] = mfspr(SPRN_EBBRR);
2973 ebb_regs[2] = mfspr(SPRN_BESCR);
2974 user_tar = mfspr(SPRN_TAR);
2975 }
2976 user_vrsave = mfspr(SPRN_VRSAVE);
2977
2937 vcpu->arch.wqp = &vcpu->arch.vcore->wq; 2978 vcpu->arch.wqp = &vcpu->arch.vcore->wq;
2938 vcpu->arch.pgdir = current->mm->pgd; 2979 vcpu->arch.pgdir = current->mm->pgd;
2939 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; 2980 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
@@ -2960,6 +3001,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
2960 } 3001 }
2961 } while (is_kvmppc_resume_guest(r)); 3002 } while (is_kvmppc_resume_guest(r));
2962 3003
3004 /* Restore userspace EBB and other register values */
3005 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
3006 mtspr(SPRN_EBBHR, ebb_regs[0]);
3007 mtspr(SPRN_EBBRR, ebb_regs[1]);
3008 mtspr(SPRN_BESCR, ebb_regs[2]);
3009 mtspr(SPRN_TAR, user_tar);
3010 mtspr(SPRN_FSCR, current->thread.fscr);
3011 }
3012 mtspr(SPRN_VRSAVE, user_vrsave);
3013
2963 out: 3014 out:
2964 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; 3015 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
2965 atomic_dec(&vcpu->kvm->arch.vcpus_running); 3016 atomic_dec(&vcpu->kvm->arch.vcpus_running);