diff options
-rw-r--r-- | arch/powerpc/kvm/book3s.c | 27 |
1 files changed, 21 insertions, 6 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 58f5200fc09b..9a271f0929c7 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -94,6 +94,23 @@ static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu) | |||
94 | } | 94 | } |
95 | #endif | 95 | #endif |
96 | 96 | ||
97 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) | ||
98 | { | ||
99 | vcpu->arch.shadow_msr = vcpu->arch.msr; | ||
100 | /* Guest MSR values */ | ||
101 | vcpu->arch.shadow_msr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | | ||
102 | MSR_BE | MSR_DE; | ||
103 | /* Process MSR values */ | ||
104 | vcpu->arch.shadow_msr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | | ||
105 | MSR_EE; | ||
106 | /* External providers the guest reserved */ | ||
107 | vcpu->arch.shadow_msr |= (vcpu->arch.msr & vcpu->arch.guest_owned_ext); | ||
108 | /* 64-bit Process MSR values */ | ||
109 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
110 | vcpu->arch.shadow_msr |= MSR_ISF | MSR_HV; | ||
111 | #endif | ||
112 | } | ||
113 | |||
97 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | 114 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) |
98 | { | 115 | { |
99 | ulong old_msr = vcpu->arch.msr; | 116 | ulong old_msr = vcpu->arch.msr; |
@@ -101,12 +118,10 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | |||
101 | #ifdef EXIT_DEBUG | 118 | #ifdef EXIT_DEBUG |
102 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); | 119 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); |
103 | #endif | 120 | #endif |
121 | |||
104 | msr &= to_book3s(vcpu)->msr_mask; | 122 | msr &= to_book3s(vcpu)->msr_mask; |
105 | vcpu->arch.msr = msr; | 123 | vcpu->arch.msr = msr; |
106 | vcpu->arch.shadow_msr = msr | MSR_USER32; | 124 | kvmppc_recalc_shadow_msr(vcpu); |
107 | vcpu->arch.shadow_msr &= (MSR_FE0 | MSR_USER64 | MSR_SE | MSR_BE | | ||
108 | MSR_DE | MSR_FE1); | ||
109 | vcpu->arch.shadow_msr |= (msr & vcpu->arch.guest_owned_ext); | ||
110 | 125 | ||
111 | if (msr & (MSR_WE|MSR_POW)) { | 126 | if (msr & (MSR_WE|MSR_POW)) { |
112 | if (!vcpu->arch.pending_exceptions) { | 127 | if (!vcpu->arch.pending_exceptions) { |
@@ -610,7 +625,7 @@ static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) | |||
610 | 625 | ||
611 | vcpu->arch.guest_owned_ext &= ~msr; | 626 | vcpu->arch.guest_owned_ext &= ~msr; |
612 | current->thread.regs->msr &= ~msr; | 627 | current->thread.regs->msr &= ~msr; |
613 | kvmppc_set_msr(vcpu, vcpu->arch.msr); | 628 | kvmppc_recalc_shadow_msr(vcpu); |
614 | } | 629 | } |
615 | 630 | ||
616 | /* Handle external providers (FPU, Altivec, VSX) */ | 631 | /* Handle external providers (FPU, Altivec, VSX) */ |
@@ -664,7 +679,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |||
664 | 679 | ||
665 | vcpu->arch.guest_owned_ext |= msr; | 680 | vcpu->arch.guest_owned_ext |= msr; |
666 | 681 | ||
667 | kvmppc_set_msr(vcpu, vcpu->arch.msr); | 682 | kvmppc_recalc_shadow_msr(vcpu); |
668 | 683 | ||
669 | return RESUME_GUEST; | 684 | return RESUME_GUEST; |
670 | } | 685 | } |