aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-01-15 08:49:14 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:35:56 -0500
commita76f8497fd475028869f4b45087d80df14d74a50 (patch)
tree02db4225c8ff20e42973a4551569f7b6bbfd0ead /arch/powerpc/kvm
parentf7adbba1e5d464b0d449adac1eb2519be6be9728 (diff)
KVM: PPC: Move Shadow MSR calculation to function
We keep a copy of the MSR around that we use when we go into the guest context. That copy is basically the normal process MSR flags OR some allowed guest specified MSR flags. We also AND the external providers into this, so we get traps on FPU usage when we haven't activated it on the host yet. Currently this calculation is part of the set_msr function that we use whenever we set the guest MSR value. With the external providers, we also have the case that we don't modify the guest's MSR, but only want to update the shadow MSR. So let's move the shadow MSR parts to a separate function that we then use whenever we only need to update it. That way we don't accidently kvm_vcpu_block within a preempt notifier context. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s.c27
1 files changed, 21 insertions, 6 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 58f5200fc09b..9a271f0929c7 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -94,6 +94,23 @@ static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu)
94} 94}
95#endif 95#endif
96 96
97static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
98{
99 vcpu->arch.shadow_msr = vcpu->arch.msr;
100 /* Guest MSR values */
101 vcpu->arch.shadow_msr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE |
102 MSR_BE | MSR_DE;
103 /* Process MSR values */
104 vcpu->arch.shadow_msr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR |
105 MSR_EE;
106 /* External providers the guest reserved */
107 vcpu->arch.shadow_msr |= (vcpu->arch.msr & vcpu->arch.guest_owned_ext);
108 /* 64-bit Process MSR values */
109#ifdef CONFIG_PPC_BOOK3S_64
110 vcpu->arch.shadow_msr |= MSR_ISF | MSR_HV;
111#endif
112}
113
97void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) 114void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
98{ 115{
99 ulong old_msr = vcpu->arch.msr; 116 ulong old_msr = vcpu->arch.msr;
@@ -101,12 +118,10 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
101#ifdef EXIT_DEBUG 118#ifdef EXIT_DEBUG
102 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); 119 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
103#endif 120#endif
121
104 msr &= to_book3s(vcpu)->msr_mask; 122 msr &= to_book3s(vcpu)->msr_mask;
105 vcpu->arch.msr = msr; 123 vcpu->arch.msr = msr;
106 vcpu->arch.shadow_msr = msr | MSR_USER32; 124 kvmppc_recalc_shadow_msr(vcpu);
107 vcpu->arch.shadow_msr &= (MSR_FE0 | MSR_USER64 | MSR_SE | MSR_BE |
108 MSR_DE | MSR_FE1);
109 vcpu->arch.shadow_msr |= (msr & vcpu->arch.guest_owned_ext);
110 125
111 if (msr & (MSR_WE|MSR_POW)) { 126 if (msr & (MSR_WE|MSR_POW)) {
112 if (!vcpu->arch.pending_exceptions) { 127 if (!vcpu->arch.pending_exceptions) {
@@ -610,7 +625,7 @@ static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
610 625
611 vcpu->arch.guest_owned_ext &= ~msr; 626 vcpu->arch.guest_owned_ext &= ~msr;
612 current->thread.regs->msr &= ~msr; 627 current->thread.regs->msr &= ~msr;
613 kvmppc_set_msr(vcpu, vcpu->arch.msr); 628 kvmppc_recalc_shadow_msr(vcpu);
614} 629}
615 630
616/* Handle external providers (FPU, Altivec, VSX) */ 631/* Handle external providers (FPU, Altivec, VSX) */
@@ -664,7 +679,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
664 679
665 vcpu->arch.guest_owned_ext |= msr; 680 vcpu->arch.guest_owned_ext |= msr;
666 681
667 kvmppc_set_msr(vcpu, vcpu->arch.msr); 682 kvmppc_recalc_shadow_msr(vcpu);
668 683
669 return RESUME_GUEST; 684 return RESUME_GUEST;
670} 685}