aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorNadav Har'El <nyh@il.ibm.com>2011-05-25 16:03:24 -0400
committerAvi Kivity <avi@redhat.com>2011-07-12 04:45:10 -0400
commit5e1746d6205d1efa3193cc0c67aa2d15e54799bd (patch)
tree9234ab8fddb5e7a8d86b506a19d7697566d51cfb /arch/x86
parentec378aeef9dfc7c4ba72e9bd6cd4bd6f7d5fd0cc (diff)
KVM: nVMX: Allow setting the VMXE bit in CR4
This patch allows the guest to enable the VMXE bit in CR4, which is a prerequisite to running VMXON. Whether to allow setting the VMXE bit now depends on the architecture (svm or vmx), so its checking has moved to kvm_x86_ops->set_cr4(). This function now returns an int: If kvm_x86_ops->set_cr4() returns 1, __kvm_set_cr4() will also return 1, and this will cause kvm_set_cr4() will throw a #GP. Turning on the VMXE bit is allowed only when the nested VMX feature is enabled, and turning it off is forbidden after a vmxon. Signed-off-by: Nadav Har'El <nyh@il.ibm.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/kvm/vmx.c17
-rw-r--r--arch/x86/kvm/x86.c4
4 files changed, 22 insertions, 7 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ff17deb6e98b..d167039ecdf4 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -555,7 +555,7 @@ struct kvm_x86_ops {
555 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); 555 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
556 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); 556 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
557 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 557 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
558 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); 558 int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
559 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); 559 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
560 void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); 560 void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
561 void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); 561 void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 506e4fe23adc..475d1c948501 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1496,11 +1496,14 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1496 update_cr0_intercept(svm); 1496 update_cr0_intercept(svm);
1497} 1497}
1498 1498
1499static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1499static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1500{ 1500{
1501 unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE; 1501 unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
1502 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4; 1502 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
1503 1503
1504 if (cr4 & X86_CR4_VMXE)
1505 return 1;
1506
1504 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE)) 1507 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
1505 svm_flush_tlb(vcpu); 1508 svm_flush_tlb(vcpu);
1506 1509
@@ -1510,6 +1513,7 @@ static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1510 cr4 |= host_cr4_mce; 1513 cr4 |= host_cr4_mce;
1511 to_svm(vcpu)->vmcb->save.cr4 = cr4; 1514 to_svm(vcpu)->vmcb->save.cr4 = cr4;
1512 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); 1515 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
1516 return 0;
1513} 1517}
1514 1518
1515static void svm_set_segment(struct kvm_vcpu *vcpu, 1519static void svm_set_segment(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3a727ca02f24..eda2cb619c25 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2121,7 +2121,7 @@ static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
2121 (unsigned long *)&vcpu->arch.regs_dirty); 2121 (unsigned long *)&vcpu->arch.regs_dirty);
2122} 2122}
2123 2123
2124static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 2124static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
2125 2125
2126static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, 2126static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
2127 unsigned long cr0, 2127 unsigned long cr0,
@@ -2219,11 +2219,23 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
2219 vmcs_writel(GUEST_CR3, guest_cr3); 2219 vmcs_writel(GUEST_CR3, guest_cr3);
2220} 2220}
2221 2221
2222static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 2222static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
2223{ 2223{
2224 unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ? 2224 unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
2225 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); 2225 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
2226 2226
2227 if (cr4 & X86_CR4_VMXE) {
2228 /*
2229 * To use VMXON (and later other VMX instructions), a guest
2230 * must first be able to turn on cr4.VMXE (see handle_vmon()).
2231 * So basically the check on whether to allow nested VMX
2232 * is here.
2233 */
2234 if (!nested_vmx_allowed(vcpu))
2235 return 1;
2236 } else if (to_vmx(vcpu)->nested.vmxon)
2237 return 1;
2238
2227 vcpu->arch.cr4 = cr4; 2239 vcpu->arch.cr4 = cr4;
2228 if (enable_ept) { 2240 if (enable_ept) {
2229 if (!is_paging(vcpu)) { 2241 if (!is_paging(vcpu)) {
@@ -2236,6 +2248,7 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
2236 2248
2237 vmcs_writel(CR4_READ_SHADOW, cr4); 2249 vmcs_writel(CR4_READ_SHADOW, cr4);
2238 vmcs_writel(GUEST_CR4, hw_cr4); 2250 vmcs_writel(GUEST_CR4, hw_cr4);
2251 return 0;
2239} 2252}
2240 2253
2241static void vmx_get_segment(struct kvm_vcpu *vcpu, 2254static void vmx_get_segment(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d88de565d0c0..460932b62c5b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -615,11 +615,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
615 kvm_read_cr3(vcpu))) 615 kvm_read_cr3(vcpu)))
616 return 1; 616 return 1;
617 617
618 if (cr4 & X86_CR4_VMXE) 618 if (kvm_x86_ops->set_cr4(vcpu, cr4))
619 return 1; 619 return 1;
620 620
621 kvm_x86_ops->set_cr4(vcpu, cr4);
622
623 if ((cr4 ^ old_cr4) & pdptr_bits) 621 if ((cr4 ^ old_cr4) & pdptr_bits)
624 kvm_mmu_reset_context(vcpu); 622 kvm_mmu_reset_context(vcpu);
625 623