aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJulian Stecklina <jsteckli@os.inf.tu-dresden.de>2012-12-05 09:26:19 -0500
committerGleb Natapov <gleb@redhat.com>2012-12-05 11:00:07 -0500
commit66f7b72e117180d0007e7a65b8dc5bd1c8126e3b (patch)
tree215798d481c12903a48136631ab6978be440ed99
parent2b3c5cbc0d814437fe4d70cc11ed60550b95b29f (diff)
KVM: x86: Make register state after reset conform to specification
VMX behaves now as SVM wrt to FPU initialization. Code has been moved to generic code path. General-purpose registers are now cleared on reset and INIT. SVM code properly initializes EDX. Signed-off-by: Julian Stecklina <jsteckli@os.inf.tu-dresden.de> Signed-off-by: Gleb Natapov <gleb@redhat.com>
-rw-r--r--arch/x86/kvm/cpuid.c1
-rw-r--r--arch/x86/kvm/svm.c14
-rw-r--r--arch/x86/kvm/vmx.c8
-rw-r--r--arch/x86/kvm/x86.c10
4 files changed, 17 insertions, 16 deletions
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 52f6166ef92c..a20ecb5b6cbf 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -661,6 +661,7 @@ void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
661 } else 661 } else
662 *eax = *ebx = *ecx = *edx = 0; 662 *eax = *ebx = *ecx = *edx = 0;
663} 663}
664EXPORT_SYMBOL_GPL(kvm_cpuid);
664 665
665void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) 666void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
666{ 667{
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index dcb79527e7aa..d29d3cd1c156 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -20,6 +20,7 @@
20#include "mmu.h" 20#include "mmu.h"
21#include "kvm_cache_regs.h" 21#include "kvm_cache_regs.h"
22#include "x86.h" 22#include "x86.h"
23#include "cpuid.h"
23 24
24#include <linux/module.h> 25#include <linux/module.h>
25#include <linux/mod_devicetable.h> 26#include <linux/mod_devicetable.h>
@@ -1193,6 +1194,8 @@ static void init_vmcb(struct vcpu_svm *svm)
1193static int svm_vcpu_reset(struct kvm_vcpu *vcpu) 1194static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
1194{ 1195{
1195 struct vcpu_svm *svm = to_svm(vcpu); 1196 struct vcpu_svm *svm = to_svm(vcpu);
1197 u32 dummy;
1198 u32 eax = 1;
1196 1199
1197 init_vmcb(svm); 1200 init_vmcb(svm);
1198 1201
@@ -1201,8 +1204,9 @@ static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
1201 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12; 1204 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
1202 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8; 1205 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
1203 } 1206 }
1204 vcpu->arch.regs_avail = ~0; 1207
1205 vcpu->arch.regs_dirty = ~0; 1208 kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy);
1209 kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
1206 1210
1207 return 0; 1211 return 0;
1208} 1212}
@@ -1259,10 +1263,6 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
1259 svm->asid_generation = 0; 1263 svm->asid_generation = 0;
1260 init_vmcb(svm); 1264 init_vmcb(svm);
1261 1265
1262 err = fx_init(&svm->vcpu);
1263 if (err)
1264 goto free_page4;
1265
1266 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; 1266 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
1267 if (kvm_vcpu_is_bsp(&svm->vcpu)) 1267 if (kvm_vcpu_is_bsp(&svm->vcpu))
1268 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; 1268 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
@@ -1271,8 +1271,6 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
1271 1271
1272 return &svm->vcpu; 1272 return &svm->vcpu;
1273 1273
1274free_page4:
1275 __free_page(hsave_page);
1276free_page3: 1274free_page3:
1277 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER); 1275 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1278free_page2: 1276free_page2:
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 32485d8d05d4..94833e2fe78c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3918,8 +3918,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
3918 u64 msr; 3918 u64 msr;
3919 int ret; 3919 int ret;
3920 3920
3921 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
3922
3923 vmx->rmode.vm86_active = 0; 3921 vmx->rmode.vm86_active = 0;
3924 3922
3925 vmx->soft_vnmi_blocked = 0; 3923 vmx->soft_vnmi_blocked = 0;
@@ -3931,10 +3929,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
3931 msr |= MSR_IA32_APICBASE_BSP; 3929 msr |= MSR_IA32_APICBASE_BSP;
3932 kvm_set_apic_base(&vmx->vcpu, msr); 3930 kvm_set_apic_base(&vmx->vcpu, msr);
3933 3931
3934 ret = fx_init(&vmx->vcpu);
3935 if (ret != 0)
3936 goto out;
3937
3938 vmx_segment_cache_clear(vmx); 3932 vmx_segment_cache_clear(vmx);
3939 3933
3940 seg_setup(VCPU_SREG_CS); 3934 seg_setup(VCPU_SREG_CS);
@@ -3975,7 +3969,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
3975 kvm_rip_write(vcpu, 0xfff0); 3969 kvm_rip_write(vcpu, 0xfff0);
3976 else 3970 else
3977 kvm_rip_write(vcpu, 0); 3971 kvm_rip_write(vcpu, 0);
3978 kvm_register_write(vcpu, VCPU_REGS_RSP, 0);
3979 3972
3980 vmcs_writel(GUEST_GDTR_BASE, 0); 3973 vmcs_writel(GUEST_GDTR_BASE, 0);
3981 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); 3974 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
@@ -4025,7 +4018,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
4025 /* HACK: Don't enable emulation on guest boot/reset */ 4018 /* HACK: Don't enable emulation on guest boot/reset */
4026 vmx->emulation_required = 0; 4019 vmx->emulation_required = 0;
4027 4020
4028out:
4029 return ret; 4021 return ret;
4030} 4022}
4031 4023
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3bdaf298b8c7..57c76e86e9bd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6461,6 +6461,10 @@ static int kvm_vcpu_reset(struct kvm_vcpu *vcpu)
6461 6461
6462 kvm_pmu_reset(vcpu); 6462 kvm_pmu_reset(vcpu);
6463 6463
6464 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
6465 vcpu->arch.regs_avail = ~0;
6466 vcpu->arch.regs_dirty = ~0;
6467
6464 return kvm_x86_ops->vcpu_reset(vcpu); 6468 return kvm_x86_ops->vcpu_reset(vcpu);
6465} 6469}
6466 6470
@@ -6629,11 +6633,17 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
6629 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) 6633 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
6630 goto fail_free_mce_banks; 6634 goto fail_free_mce_banks;
6631 6635
6636 r = fx_init(vcpu);
6637 if (r)
6638 goto fail_free_wbinvd_dirty_mask;
6639
6632 vcpu->arch.ia32_tsc_adjust_msr = 0x0; 6640 vcpu->arch.ia32_tsc_adjust_msr = 0x0;
6633 kvm_async_pf_hash_reset(vcpu); 6641 kvm_async_pf_hash_reset(vcpu);
6634 kvm_pmu_init(vcpu); 6642 kvm_pmu_init(vcpu);
6635 6643
6636 return 0; 6644 return 0;
6645fail_free_wbinvd_dirty_mask:
6646 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
6637fail_free_mce_banks: 6647fail_free_mce_banks:
6638 kfree(vcpu->arch.mce_banks); 6648 kfree(vcpu->arch.mce_banks);
6639fail_free_lapic: 6649fail_free_lapic: