aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@amacapital.net>2014-10-08 12:02:13 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-18 13:09:03 -0400
commitd974baa398f34393db76be45f7d4d04fbdbb4a0a (patch)
tree781bdd81421b75e92e98aafbeb2d7ad5845d0fc5 /arch/x86
parent2e923b0251932ad4a82cc87ec1443a1f1d17073e (diff)
x86,kvm,vmx: Preserve CR4 across VM entry
CR4 isn't constant; at least the TSD and PCE bits can vary. TBH, treating CR0 and CR3 as constant scares me a bit, too, but it looks like it's correct. This adds a branch and a read from cr4 to each vm entry. Because it is extremely likely that consecutive entries into the same vcpu will have the same host cr4 value, this fixes up the vmcs instead of restoring cr4 after the fact. A subsequent patch will add a kernel-wide cr4 shadow, reducing the overhead in the common case to just two memory reads and a branch. Signed-off-by: Andy Lutomirski <luto@amacapital.net> Acked-by: Paolo Bonzini <pbonzini@redhat.com> Cc: stable@vger.kernel.org Cc: Petr Matousek <pmatouse@redhat.com> Cc: Gleb Natapov <gleb@kernel.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/vmx.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d9dcfa27aa84..0acac81f198b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -472,6 +472,7 @@ struct vcpu_vmx {
472 int gs_ldt_reload_needed; 472 int gs_ldt_reload_needed;
473 int fs_reload_needed; 473 int fs_reload_needed;
474 u64 msr_host_bndcfgs; 474 u64 msr_host_bndcfgs;
475 unsigned long vmcs_host_cr4; /* May not match real cr4 */
475 } host_state; 476 } host_state;
476 struct { 477 struct {
477 int vm86_active; 478 int vm86_active;
@@ -4267,11 +4268,16 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
4267 u32 low32, high32; 4268 u32 low32, high32;
4268 unsigned long tmpl; 4269 unsigned long tmpl;
4269 struct desc_ptr dt; 4270 struct desc_ptr dt;
4271 unsigned long cr4;
4270 4272
4271 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */ 4273 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
4272 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
4273 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ 4274 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
4274 4275
4276 /* Save the most likely value for this task's CR4 in the VMCS. */
4277 cr4 = read_cr4();
4278 vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
4279 vmx->host_state.vmcs_host_cr4 = cr4;
4280
4275 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ 4281 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
4276#ifdef CONFIG_X86_64 4282#ifdef CONFIG_X86_64
4277 /* 4283 /*
@@ -7514,7 +7520,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
7514static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) 7520static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
7515{ 7521{
7516 struct vcpu_vmx *vmx = to_vmx(vcpu); 7522 struct vcpu_vmx *vmx = to_vmx(vcpu);
7517 unsigned long debugctlmsr; 7523 unsigned long debugctlmsr, cr4;
7518 7524
7519 /* Record the guest's net vcpu time for enforced NMI injections. */ 7525 /* Record the guest's net vcpu time for enforced NMI injections. */
7520 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) 7526 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
@@ -7540,6 +7546,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
7540 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) 7546 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
7541 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); 7547 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
7542 7548
7549 cr4 = read_cr4();
7550 if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
7551 vmcs_writel(HOST_CR4, cr4);
7552 vmx->host_state.vmcs_host_cr4 = cr4;
7553 }
7554
7543 /* When single-stepping over STI and MOV SS, we must clear the 7555 /* When single-stepping over STI and MOV SS, we must clear the
7544 * corresponding interruptibility bits in the guest state. Otherwise 7556 * corresponding interruptibility bits in the guest state. Otherwise
7545 * vmentry fails as it then expects bit 14 (BS) in pending debug 7557 * vmentry fails as it then expects bit 14 (BS) in pending debug