aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-12-05 11:56:11 -0500
committerAvi Kivity <avi@redhat.com>2011-01-12 04:31:16 -0500
commitaff48baa34c033318ad322ecbf2e4bcd891b29ca (patch)
tree84955c55c812dd540cd0c54e5bdf6d3f5bfd08be /arch
parent9f8fe5043fd26627c2fa2e9a41896885e675000b (diff)
KVM: Fetch guest cr3 from hardware on demand
Instead of syncing the guest cr3 every exit, which is expensince on vmx with ept enabled, sync it only on demand. [sheng: fix incorrect cr3 seen by Windows XP] Signed-off-by: Sheng Yang <sheng@linux.intel.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h2
-rw-r--r--arch/x86/kvm/svm.c5
-rw-r--r--arch/x86/kvm/vmx.c18
-rw-r--r--arch/x86/kvm/x86.c2
5 files changed, 23 insertions, 6 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6268f6ce6434..95f026be8b5e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -117,6 +117,7 @@ enum kvm_reg {
117 117
118enum kvm_reg_ex { 118enum kvm_reg_ex {
119 VCPU_EXREG_PDPTR = NR_VCPU_REGS, 119 VCPU_EXREG_PDPTR = NR_VCPU_REGS,
120 VCPU_EXREG_CR3,
120}; 121};
121 122
122enum { 123enum {
@@ -533,6 +534,7 @@ struct kvm_x86_ops {
533 struct kvm_segment *var, int seg); 534 struct kvm_segment *var, int seg);
534 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); 535 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
535 void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu); 536 void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
537 void (*decache_cr3)(struct kvm_vcpu *vcpu);
536 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); 538 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
537 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); 539 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
538 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 540 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index a6bf8db326f5..3377d53fcd36 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -75,6 +75,8 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
75 75
76static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) 76static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
77{ 77{
78 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
79 kvm_x86_ops->decache_cr3(vcpu);
78 return vcpu->arch.cr3; 80 return vcpu->arch.cr3;
79} 81}
80 82
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a7b04c0bd7a5..25bd1bc5aad2 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1327,6 +1327,10 @@ static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
1327{ 1327{
1328} 1328}
1329 1329
1330static void svm_decache_cr3(struct kvm_vcpu *vcpu)
1331{
1332}
1333
1330static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) 1334static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1331{ 1335{
1332} 1336}
@@ -3871,6 +3875,7 @@ static struct kvm_x86_ops svm_x86_ops = {
3871 .get_cpl = svm_get_cpl, 3875 .get_cpl = svm_get_cpl,
3872 .get_cs_db_l_bits = kvm_get_cs_db_l_bits, 3876 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
3873 .decache_cr0_guest_bits = svm_decache_cr0_guest_bits, 3877 .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
3878 .decache_cr3 = svm_decache_cr3,
3874 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits, 3879 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
3875 .set_cr0 = svm_set_cr0, 3880 .set_cr0 = svm_set_cr0,
3876 .set_cr3 = svm_set_cr3, 3881 .set_cr3 = svm_set_cr3,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 141956ebf794..1896cada805f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -180,6 +180,7 @@ static int init_rmode(struct kvm *kvm);
180static u64 construct_eptp(unsigned long root_hpa); 180static u64 construct_eptp(unsigned long root_hpa);
181static void kvm_cpu_vmxon(u64 addr); 181static void kvm_cpu_vmxon(u64 addr);
182static void kvm_cpu_vmxoff(void); 182static void kvm_cpu_vmxoff(void);
183static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
183 184
184static DEFINE_PER_CPU(struct vmcs *, vmxarea); 185static DEFINE_PER_CPU(struct vmcs *, vmxarea);
185static DEFINE_PER_CPU(struct vmcs *, current_vmcs); 186static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -1866,6 +1867,13 @@ static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
1866 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits; 1867 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
1867} 1868}
1868 1869
1870static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
1871{
1872 if (enable_ept && is_paging(vcpu))
1873 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
1874 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
1875}
1876
1869static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) 1877static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1870{ 1878{
1871 ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; 1879 ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
@@ -1909,6 +1917,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
1909 unsigned long cr0, 1917 unsigned long cr0,
1910 struct kvm_vcpu *vcpu) 1918 struct kvm_vcpu *vcpu)
1911{ 1919{
1920 vmx_decache_cr3(vcpu);
1912 if (!(cr0 & X86_CR0_PG)) { 1921 if (!(cr0 & X86_CR0_PG)) {
1913 /* From paging/starting to nonpaging */ 1922 /* From paging/starting to nonpaging */
1914 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, 1923 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
@@ -3756,11 +3765,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
3756 if (vmx->emulation_required && emulate_invalid_guest_state) 3765 if (vmx->emulation_required && emulate_invalid_guest_state)
3757 return handle_invalid_guest_state(vcpu); 3766 return handle_invalid_guest_state(vcpu);
3758 3767
3759 /* Access CR3 don't cause VMExit in paging mode, so we need
3760 * to sync with guest real CR3. */
3761 if (enable_ept && is_paging(vcpu))
3762 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
3763
3764 if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) { 3768 if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
3765 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; 3769 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3766 vcpu->run->fail_entry.hardware_entry_failure_reason 3770 vcpu->run->fail_entry.hardware_entry_failure_reason
@@ -4077,7 +4081,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
4077 ); 4081 );
4078 4082
4079 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) 4083 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
4080 | (1 << VCPU_EXREG_PDPTR)); 4084 | (1 << VCPU_EXREG_PDPTR)
4085 | (1 << VCPU_EXREG_CR3));
4081 vcpu->arch.regs_dirty = 0; 4086 vcpu->arch.regs_dirty = 0;
4082 4087
4083 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); 4088 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
@@ -4344,6 +4349,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
4344 .get_cpl = vmx_get_cpl, 4349 .get_cpl = vmx_get_cpl,
4345 .get_cs_db_l_bits = vmx_get_cs_db_l_bits, 4350 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
4346 .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits, 4351 .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
4352 .decache_cr3 = vmx_decache_cr3,
4347 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, 4353 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
4348 .set_cr0 = vmx_set_cr0, 4354 .set_cr0 = vmx_set_cr0,
4349 .set_cr3 = vmx_set_cr3, 4355 .set_cr3 = vmx_set_cr3,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6e50314d64fb..fa708c9a7437 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -667,6 +667,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
667 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) 667 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
668 return 1; 668 return 1;
669 vcpu->arch.cr3 = cr3; 669 vcpu->arch.cr3 = cr3;
670 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
670 vcpu->arch.mmu.new_cr3(vcpu); 671 vcpu->arch.mmu.new_cr3(vcpu);
671 return 0; 672 return 0;
672} 673}
@@ -5583,6 +5584,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5583 vcpu->arch.cr2 = sregs->cr2; 5584 vcpu->arch.cr2 = sregs->cr2;
5584 mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; 5585 mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
5585 vcpu->arch.cr3 = sregs->cr3; 5586 vcpu->arch.cr3 = sregs->cr3;
5587 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
5586 5588
5587 kvm_set_cr8(vcpu, sregs->cr8); 5589 kvm_set_cr8(vcpu, sregs->cr8);
5588 5590