aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorRoedel, Joerg <Joerg.Roedel@amd.com>2010-05-06 05:38:43 -0400
committerAvi Kivity <avi@redhat.com>2010-05-19 04:36:39 -0400
commitb69e8caef5b190af48c525f6d715e7b7728a77f6 (patch)
tree102d0728a1c8ae0b698e6f746cd40a67dfc2db94 /arch/x86
parent0d945bd9351199744c1e89d57a70615b6ee9f394 (diff)
KVM: x86: Inject #GP with the right rip on efer writes
This patch fixes a bug in the KVM efer-msr write path. If a guest writes to a reserved efer bit the set_efer function injects the #GP directly. The architecture dependent wrmsr function does not see this, assumes success and advances the rip. This results in a #GP in the guest with the wrong rip. This patch fixes this by reporting efer write errors back to the architectural wrmsr function. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/x86.c31
1 files changed, 12 insertions, 19 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 161ede2b5f91..fe6d126633d8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -683,37 +683,29 @@ static u32 emulated_msrs[] = {
683 MSR_IA32_MISC_ENABLE, 683 MSR_IA32_MISC_ENABLE,
684}; 684};
685 685
686static void set_efer(struct kvm_vcpu *vcpu, u64 efer) 686static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
687{ 687{
688 if (efer & efer_reserved_bits) { 688 if (efer & efer_reserved_bits)
689 kvm_inject_gp(vcpu, 0); 689 return 1;
690 return;
691 }
692 690
693 if (is_paging(vcpu) 691 if (is_paging(vcpu)
694 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) { 692 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
695 kvm_inject_gp(vcpu, 0); 693 return 1;
696 return;
697 }
698 694
699 if (efer & EFER_FFXSR) { 695 if (efer & EFER_FFXSR) {
700 struct kvm_cpuid_entry2 *feat; 696 struct kvm_cpuid_entry2 *feat;
701 697
702 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 698 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
703 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { 699 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
704 kvm_inject_gp(vcpu, 0); 700 return 1;
705 return;
706 }
707 } 701 }
708 702
709 if (efer & EFER_SVME) { 703 if (efer & EFER_SVME) {
710 struct kvm_cpuid_entry2 *feat; 704 struct kvm_cpuid_entry2 *feat;
711 705
712 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 706 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
713 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { 707 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
714 kvm_inject_gp(vcpu, 0); 708 return 1;
715 return;
716 }
717 } 709 }
718 710
719 kvm_x86_ops->set_efer(vcpu, efer); 711 kvm_x86_ops->set_efer(vcpu, efer);
@@ -725,6 +717,8 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
725 717
726 vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled; 718 vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
727 kvm_mmu_reset_context(vcpu); 719 kvm_mmu_reset_context(vcpu);
720
721 return 0;
728} 722}
729 723
730void kvm_enable_efer_bits(u64 mask) 724void kvm_enable_efer_bits(u64 mask)
@@ -1153,8 +1147,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1153{ 1147{
1154 switch (msr) { 1148 switch (msr) {
1155 case MSR_EFER: 1149 case MSR_EFER:
1156 set_efer(vcpu, data); 1150 return set_efer(vcpu, data);
1157 break;
1158 case MSR_K7_HWCR: 1151 case MSR_K7_HWCR:
1159 data &= ~(u64)0x40; /* ignore flush filter disable */ 1152 data &= ~(u64)0x40; /* ignore flush filter disable */
1160 data &= ~(u64)0x100; /* ignore ignne emulation enable */ 1153 data &= ~(u64)0x100; /* ignore ignne emulation enable */