aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2009-12-02 05:28:47 -0500
committerAvi Kivity <avi@redhat.com>2009-12-03 02:34:20 -0500
commitd5696725b2a4c59503f5e0bc33adeee7f30cd45b (patch)
tree56069b502c198fbfef0eb0d9bd3a2329b3d37cef
parentf50146bd7bdb75435638e60d4960edd9bcdf88b8 (diff)
KVM: VMX: Fix comparison of guest efer with stale host value
update_transition_efer() masks out some efer bits when deciding whether to switch the msr during guest entry; for example, NX is emulated using the mmu so we don't need to disable it, and LMA/LME are handled by the hardware. However, with shared msrs, the comparison is made against a stale value; at the time of the guest switch we may be running with another guest's efer. Fix by deferring the mask/compare to the actual point of guest entry. Noted by Marcelo. Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/vmx.c9
-rw-r--r--arch/x86/kvm/x86.c4
3 files changed, 8 insertions, 7 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 06e085614dad..4f865e8b8540 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -812,6 +812,6 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
812int kvm_cpu_get_interrupt(struct kvm_vcpu *v); 812int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
813 813
814void kvm_define_shared_msr(unsigned index, u32 msr); 814void kvm_define_shared_msr(unsigned index, u32 msr);
815void kvm_set_shared_msr(unsigned index, u64 val); 815void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
816 816
817#endif /* _ASM_X86_KVM_HOST_H */ 817#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ed97c6c7e648..d4918d6fc924 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -89,6 +89,7 @@ struct vmcs {
89struct shared_msr_entry { 89struct shared_msr_entry {
90 unsigned index; 90 unsigned index;
91 u64 data; 91 u64 data;
92 u64 mask;
92}; 93};
93 94
94struct vcpu_vmx { 95struct vcpu_vmx {
@@ -601,12 +602,10 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
601 if (guest_efer & EFER_LMA) 602 if (guest_efer & EFER_LMA)
602 ignore_bits &= ~(u64)EFER_SCE; 603 ignore_bits &= ~(u64)EFER_SCE;
603#endif 604#endif
604 if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits))
605 return false;
606
607 guest_efer &= ~ignore_bits; 605 guest_efer &= ~ignore_bits;
608 guest_efer |= host_efer & ignore_bits; 606 guest_efer |= host_efer & ignore_bits;
609 vmx->guest_msrs[efer_offset].data = guest_efer; 607 vmx->guest_msrs[efer_offset].data = guest_efer;
608 vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
610 return true; 609 return true;
611} 610}
612 611
@@ -657,7 +656,8 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
657#endif 656#endif
658 for (i = 0; i < vmx->save_nmsrs; ++i) 657 for (i = 0; i < vmx->save_nmsrs; ++i)
659 kvm_set_shared_msr(vmx->guest_msrs[i].index, 658 kvm_set_shared_msr(vmx->guest_msrs[i].index,
660 vmx->guest_msrs[i].data); 659 vmx->guest_msrs[i].data,
660 vmx->guest_msrs[i].mask);
661} 661}
662 662
663static void __vmx_load_host_state(struct vcpu_vmx *vmx) 663static void __vmx_load_host_state(struct vcpu_vmx *vmx)
@@ -2394,6 +2394,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
2394 data = data_low | ((u64)data_high << 32); 2394 data = data_low | ((u64)data_high << 32);
2395 vmx->guest_msrs[j].index = i; 2395 vmx->guest_msrs[j].index = i;
2396 vmx->guest_msrs[j].data = 0; 2396 vmx->guest_msrs[j].data = 0;
2397 vmx->guest_msrs[j].mask = -1ull;
2397 ++vmx->nmsrs; 2398 ++vmx->nmsrs;
2398 } 2399 }
2399 2400
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 106f9f1f78c0..ce677b20bf86 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -185,11 +185,11 @@ static void kvm_shared_msr_cpu_online(void)
185 locals->current_value[i] = shared_msrs_global.msrs[i].value; 185 locals->current_value[i] = shared_msrs_global.msrs[i].value;
186} 186}
187 187
188void kvm_set_shared_msr(unsigned slot, u64 value) 188void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
189{ 189{
190 struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); 190 struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
191 191
192 if (value == smsr->current_value[slot]) 192 if (((value ^ smsr->current_value[slot]) & mask) == 0)
193 return; 193 return;
194 smsr->current_value[slot] = value; 194 smsr->current_value[slot] = value;
195 wrmsrl(shared_msrs_global.msrs[slot].msr, value); 195 wrmsrl(shared_msrs_global.msrs[slot].msr, value);