aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2011-02-21 05:07:59 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2011-03-17 12:08:31 -0400
commit93ea5388ead5d7b87f54b8de53e35231acec8bbe (patch)
tree7ec192ad68fc3a28021ccfb229f5a49bd9980510
parenta8ba6c2622036101d0c6a195f97546bcb1a056ab (diff)
KVM: VMX: Initialize vm86 TSS only once.
Currently vm86 task is initialized on each real mode entry and vcpu reset. Initialization is done by zeroing TSS and updating relevant fields. But since all vcpus are using the same TSS there is a race where one vcpu may use TSS while other vcpu is initializing it, so the vcpu that uses TSS will see wrong TSS content and will behave incorrectly. Fix that by initializing TSS only once. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/kvm/vmx.c28
1 files changed, 6 insertions, 22 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index dafb67eddd60..e2b8c6b21ff2 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -176,7 +176,6 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
176 return container_of(vcpu, struct vcpu_vmx, vcpu); 176 return container_of(vcpu, struct vcpu_vmx, vcpu);
177} 177}
178 178
179static int init_rmode(struct kvm *kvm);
180static u64 construct_eptp(unsigned long root_hpa); 179static u64 construct_eptp(unsigned long root_hpa);
181static void kvm_cpu_vmxon(u64 addr); 180static void kvm_cpu_vmxon(u64 addr);
182static void kvm_cpu_vmxoff(void); 181static void kvm_cpu_vmxoff(void);
@@ -1802,7 +1801,6 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
1802 1801
1803continue_rmode: 1802continue_rmode:
1804 kvm_mmu_reset_context(vcpu); 1803 kvm_mmu_reset_context(vcpu);
1805 init_rmode(vcpu->kvm);
1806} 1804}
1807 1805
1808static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) 1806static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
@@ -2737,22 +2735,6 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
2737 return 0; 2735 return 0;
2738} 2736}
2739 2737
2740static int init_rmode(struct kvm *kvm)
2741{
2742 int idx, ret = 0;
2743
2744 idx = srcu_read_lock(&kvm->srcu);
2745 if (!init_rmode_tss(kvm))
2746 goto exit;
2747 if (!init_rmode_identity_map(kvm))
2748 goto exit;
2749
2750 ret = 1;
2751exit:
2752 srcu_read_unlock(&kvm->srcu, idx);
2753 return ret;
2754}
2755
2756static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) 2738static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2757{ 2739{
2758 struct vcpu_vmx *vmx = to_vmx(vcpu); 2740 struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -2760,10 +2742,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2760 int ret; 2742 int ret;
2761 2743
2762 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); 2744 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
2763 if (!init_rmode(vmx->vcpu.kvm)) {
2764 ret = -ENOMEM;
2765 goto out;
2766 }
2767 2745
2768 vmx->rmode.vm86_active = 0; 2746 vmx->rmode.vm86_active = 0;
2769 2747
@@ -3009,6 +2987,9 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
3009 if (ret) 2987 if (ret)
3010 return ret; 2988 return ret;
3011 kvm->arch.tss_addr = addr; 2989 kvm->arch.tss_addr = addr;
2990 if (!init_rmode_tss(kvm))
2991 return -ENOMEM;
2992
3012 return 0; 2993 return 0;
3013} 2994}
3014 2995
@@ -4224,8 +4205,11 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
4224 if (!kvm->arch.ept_identity_map_addr) 4205 if (!kvm->arch.ept_identity_map_addr)
4225 kvm->arch.ept_identity_map_addr = 4206 kvm->arch.ept_identity_map_addr =
4226 VMX_EPT_IDENTITY_PAGETABLE_ADDR; 4207 VMX_EPT_IDENTITY_PAGETABLE_ADDR;
4208 err = -ENOMEM;
4227 if (alloc_identity_pagetable(kvm) != 0) 4209 if (alloc_identity_pagetable(kvm) != 0)
4228 goto free_vmcs; 4210 goto free_vmcs;
4211 if (!init_rmode_identity_map(kvm))
4212 goto free_vmcs;
4229 } 4213 }
4230 4214
4231 return &vmx->vcpu; 4215 return &vmx->vcpu;