diff options
author | Avi Kivity <avi@redhat.com> | 2009-12-02 05:28:47 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-12-03 02:34:20 -0500 |
commit | d5696725b2a4c59503f5e0bc33adeee7f30cd45b (patch) | |
tree | 56069b502c198fbfef0eb0d9bd3a2329b3d37cef /arch/x86/kvm/vmx.c | |
parent | f50146bd7bdb75435638e60d4960edd9bcdf88b8 (diff) |
KVM: VMX: Fix comparison of guest efer with stale host value
update_transition_efer() masks out some efer bits when deciding whether
to switch the msr during guest entry; for example, NX is emulated using the
mmu so we don't need to disable it, and LMA/LME are handled by the hardware.
However, with shared msrs, the comparison is made against a stale value;
at the time of the guest switch we may be running with another guest's efer.
Fix by deferring the mask/compare to the actual point of guest entry.
Noted by Marcelo.
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 9 |
1 files changed, 5 insertions, 4 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ed97c6c7e648..d4918d6fc924 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -89,6 +89,7 @@ struct vmcs { | |||
89 | struct shared_msr_entry { | 89 | struct shared_msr_entry { |
90 | unsigned index; | 90 | unsigned index; |
91 | u64 data; | 91 | u64 data; |
92 | u64 mask; | ||
92 | }; | 93 | }; |
93 | 94 | ||
94 | struct vcpu_vmx { | 95 | struct vcpu_vmx { |
@@ -601,12 +602,10 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) | |||
601 | if (guest_efer & EFER_LMA) | 602 | if (guest_efer & EFER_LMA) |
602 | ignore_bits &= ~(u64)EFER_SCE; | 603 | ignore_bits &= ~(u64)EFER_SCE; |
603 | #endif | 604 | #endif |
604 | if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits)) | ||
605 | return false; | ||
606 | |||
607 | guest_efer &= ~ignore_bits; | 605 | guest_efer &= ~ignore_bits; |
608 | guest_efer |= host_efer & ignore_bits; | 606 | guest_efer |= host_efer & ignore_bits; |
609 | vmx->guest_msrs[efer_offset].data = guest_efer; | 607 | vmx->guest_msrs[efer_offset].data = guest_efer; |
608 | vmx->guest_msrs[efer_offset].mask = ~ignore_bits; | ||
610 | return true; | 609 | return true; |
611 | } | 610 | } |
612 | 611 | ||
@@ -657,7 +656,8 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) | |||
657 | #endif | 656 | #endif |
658 | for (i = 0; i < vmx->save_nmsrs; ++i) | 657 | for (i = 0; i < vmx->save_nmsrs; ++i) |
659 | kvm_set_shared_msr(vmx->guest_msrs[i].index, | 658 | kvm_set_shared_msr(vmx->guest_msrs[i].index, |
660 | vmx->guest_msrs[i].data); | 659 | vmx->guest_msrs[i].data, |
660 | vmx->guest_msrs[i].mask); | ||
661 | } | 661 | } |
662 | 662 | ||
663 | static void __vmx_load_host_state(struct vcpu_vmx *vmx) | 663 | static void __vmx_load_host_state(struct vcpu_vmx *vmx) |
@@ -2394,6 +2394,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
2394 | data = data_low | ((u64)data_high << 32); | 2394 | data = data_low | ((u64)data_high << 32); |
2395 | vmx->guest_msrs[j].index = i; | 2395 | vmx->guest_msrs[j].index = i; |
2396 | vmx->guest_msrs[j].data = 0; | 2396 | vmx->guest_msrs[j].data = 0; |
2397 | vmx->guest_msrs[j].mask = -1ull; | ||
2397 | ++vmx->nmsrs; | 2398 | ++vmx->nmsrs; |
2398 | } | 2399 | } |
2399 | 2400 | ||