aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2009-12-07 05:16:48 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:35:39 -0500
commitfc78f51938e1ea866daa2045851b2e5681371668 (patch)
treebf46fdc9daca6cab3b42d102ec12c133a62cb2ff /arch
parentcdc0e24456bf5678f63497569c3676c9019f82c1 (diff)
KVM: Add accessor for reading cr4 (or some bits of cr4)
Some bits of cr4 can be owned by the guest on vmx, so when we read them, we copy them to the vcpu structure. In preparation for making the set of guest-owned bits dynamic, use helpers to access these bits so we don't need to know where the bit resides. No changes to svm since all bits are host-owned there. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h12
-rw-r--r--arch/x86/kvm/mmu.h5
-rw-r--r--arch/x86/kvm/vmx.c13
-rw-r--r--arch/x86/kvm/x86.c16
5 files changed, 30 insertions, 17 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index da6dee86276..e9f4f12ec3c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -272,6 +272,7 @@ struct kvm_vcpu_arch {
272 unsigned long cr2; 272 unsigned long cr2;
273 unsigned long cr3; 273 unsigned long cr3;
274 unsigned long cr4; 274 unsigned long cr4;
275 unsigned long cr4_guest_owned_bits;
275 unsigned long cr8; 276 unsigned long cr8;
276 u32 hflags; 277 u32 hflags;
277 u64 pdptrs[4]; /* pae */ 278 u64 pdptrs[4]; /* pae */
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 7bcc5b6a440..35acc36e178 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -38,4 +38,16 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
38 return vcpu->arch.pdptrs[index]; 38 return vcpu->arch.pdptrs[index];
39} 39}
40 40
41static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
42{
43 if (mask & vcpu->arch.cr4_guest_owned_bits)
44 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
45 return vcpu->arch.cr4 & mask;
46}
47
48static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
49{
50 return kvm_read_cr4_bits(vcpu, ~0UL);
51}
52
41#endif 53#endif
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 61a1b3884b4..4567d8042b2 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -2,6 +2,7 @@
2#define __KVM_X86_MMU_H 2#define __KVM_X86_MMU_H
3 3
4#include <linux/kvm_host.h> 4#include <linux/kvm_host.h>
5#include "kvm_cache_regs.h"
5 6
6#define PT64_PT_BITS 9 7#define PT64_PT_BITS 9
7#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS) 8#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
@@ -64,12 +65,12 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu)
64 65
65static inline int is_pae(struct kvm_vcpu *vcpu) 66static inline int is_pae(struct kvm_vcpu *vcpu)
66{ 67{
67 return vcpu->arch.cr4 & X86_CR4_PAE; 68 return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
68} 69}
69 70
70static inline int is_pse(struct kvm_vcpu *vcpu) 71static inline int is_pse(struct kvm_vcpu *vcpu)
71{ 72{
72 return vcpu->arch.cr4 & X86_CR4_PSE; 73 return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
73} 74}
74 75
75static inline int is_paging(struct kvm_vcpu *vcpu) 76static inline int is_paging(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index efbb614ccd3..284e905c59d 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1615,8 +1615,10 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
1615 1615
1616static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) 1616static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1617{ 1617{
1618 vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK; 1618 ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
1619 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK; 1619
1620 vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
1621 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
1620} 1622}
1621 1623
1622static void ept_load_pdptrs(struct kvm_vcpu *vcpu) 1624static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
@@ -1661,7 +1663,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
1661 (CPU_BASED_CR3_LOAD_EXITING | 1663 (CPU_BASED_CR3_LOAD_EXITING |
1662 CPU_BASED_CR3_STORE_EXITING)); 1664 CPU_BASED_CR3_STORE_EXITING));
1663 vcpu->arch.cr0 = cr0; 1665 vcpu->arch.cr0 = cr0;
1664 vmx_set_cr4(vcpu, vcpu->arch.cr4); 1666 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
1665 } else if (!is_paging(vcpu)) { 1667 } else if (!is_paging(vcpu)) {
1666 /* From nonpaging to paging */ 1668 /* From nonpaging to paging */
1667 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, 1669 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
@@ -1669,7 +1671,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
1669 ~(CPU_BASED_CR3_LOAD_EXITING | 1671 ~(CPU_BASED_CR3_LOAD_EXITING |
1670 CPU_BASED_CR3_STORE_EXITING)); 1672 CPU_BASED_CR3_STORE_EXITING));
1671 vcpu->arch.cr0 = cr0; 1673 vcpu->arch.cr0 = cr0;
1672 vmx_set_cr4(vcpu, vcpu->arch.cr4); 1674 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
1673 } 1675 }
1674 1676
1675 if (!(cr0 & X86_CR0_WP)) 1677 if (!(cr0 & X86_CR0_WP))
@@ -2420,6 +2422,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
2420 2422
2421 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); 2423 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
2422 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); 2424 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
2425 vmx->vcpu.arch.cr4_guest_owned_bits = ~KVM_GUEST_CR4_MASK;
2423 2426
2424 tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc; 2427 tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc;
2425 rdtscll(tsc_this); 2428 rdtscll(tsc_this);
@@ -3050,7 +3053,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
3050 vcpu->arch.eff_db[dr] = val; 3053 vcpu->arch.eff_db[dr] = val;
3051 break; 3054 break;
3052 case 4 ... 5: 3055 case 4 ... 5:
3053 if (vcpu->arch.cr4 & X86_CR4_DE) 3056 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
3054 kvm_queue_exception(vcpu, UD_VECTOR); 3057 kvm_queue_exception(vcpu, UD_VECTOR);
3055 break; 3058 break;
3056 case 6: 3059 case 6:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 27931867791..84dd33e717f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -482,7 +482,7 @@ EXPORT_SYMBOL_GPL(kvm_lmsw);
482 482
483void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 483void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
484{ 484{
485 unsigned long old_cr4 = vcpu->arch.cr4; 485 unsigned long old_cr4 = kvm_read_cr4(vcpu);
486 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; 486 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
487 487
488 if (cr4 & CR4_RESERVED_BITS) { 488 if (cr4 & CR4_RESERVED_BITS) {
@@ -1899,7 +1899,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
1899 return 0; 1899 return 0;
1900 if (mce->status & MCI_STATUS_UC) { 1900 if (mce->status & MCI_STATUS_UC) {
1901 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || 1901 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
1902 !(vcpu->arch.cr4 & X86_CR4_MCE)) { 1902 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
1903 printk(KERN_DEBUG "kvm: set_mce: " 1903 printk(KERN_DEBUG "kvm: set_mce: "
1904 "injects mce exception while " 1904 "injects mce exception while "
1905 "previous one is in progress!\n"); 1905 "previous one is in progress!\n");
@@ -3616,7 +3616,6 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
3616{ 3616{
3617 unsigned long value; 3617 unsigned long value;
3618 3618
3619 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
3620 switch (cr) { 3619 switch (cr) {
3621 case 0: 3620 case 0:
3622 value = vcpu->arch.cr0; 3621 value = vcpu->arch.cr0;
@@ -3628,7 +3627,7 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
3628 value = vcpu->arch.cr3; 3627 value = vcpu->arch.cr3;
3629 break; 3628 break;
3630 case 4: 3629 case 4:
3631 value = vcpu->arch.cr4; 3630 value = kvm_read_cr4(vcpu);
3632 break; 3631 break;
3633 case 8: 3632 case 8:
3634 value = kvm_get_cr8(vcpu); 3633 value = kvm_get_cr8(vcpu);
@@ -3656,7 +3655,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
3656 kvm_set_cr3(vcpu, val); 3655 kvm_set_cr3(vcpu, val);
3657 break; 3656 break;
3658 case 4: 3657 case 4:
3659 kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val)); 3658 kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
3660 break; 3659 break;
3661 case 8: 3660 case 8:
3662 kvm_set_cr8(vcpu, val & 0xfUL); 3661 kvm_set_cr8(vcpu, val & 0xfUL);
@@ -4237,11 +4236,10 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4237 sregs->gdt.limit = dt.limit; 4236 sregs->gdt.limit = dt.limit;
4238 sregs->gdt.base = dt.base; 4237 sregs->gdt.base = dt.base;
4239 4238
4240 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
4241 sregs->cr0 = vcpu->arch.cr0; 4239 sregs->cr0 = vcpu->arch.cr0;
4242 sregs->cr2 = vcpu->arch.cr2; 4240 sregs->cr2 = vcpu->arch.cr2;
4243 sregs->cr3 = vcpu->arch.cr3; 4241 sregs->cr3 = vcpu->arch.cr3;
4244 sregs->cr4 = vcpu->arch.cr4; 4242 sregs->cr4 = kvm_read_cr4(vcpu);
4245 sregs->cr8 = kvm_get_cr8(vcpu); 4243 sregs->cr8 = kvm_get_cr8(vcpu);
4246 sregs->efer = vcpu->arch.shadow_efer; 4244 sregs->efer = vcpu->arch.shadow_efer;
4247 sregs->apic_base = kvm_get_apic_base(vcpu); 4245 sregs->apic_base = kvm_get_apic_base(vcpu);
@@ -4737,13 +4735,11 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4737 kvm_x86_ops->set_efer(vcpu, sregs->efer); 4735 kvm_x86_ops->set_efer(vcpu, sregs->efer);
4738 kvm_set_apic_base(vcpu, sregs->apic_base); 4736 kvm_set_apic_base(vcpu, sregs->apic_base);
4739 4737
4740 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
4741
4742 mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0; 4738 mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
4743 kvm_x86_ops->set_cr0(vcpu, sregs->cr0); 4739 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
4744 vcpu->arch.cr0 = sregs->cr0; 4740 vcpu->arch.cr0 = sregs->cr0;
4745 4741
4746 mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4; 4742 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
4747 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); 4743 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
4748 if (!is_long_mode(vcpu) && is_pae(vcpu)) { 4744 if (!is_long_mode(vcpu) && is_pae(vcpu)) {
4749 load_pdptrs(vcpu, vcpu->arch.cr3); 4745 load_pdptrs(vcpu, vcpu->arch.cr3);