aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2009-12-07 05:16:48 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:35:39 -0500
commitfc78f51938e1ea866daa2045851b2e5681371668 (patch)
treebf46fdc9daca6cab3b42d102ec12c133a62cb2ff /arch/x86/kvm/vmx.c
parentcdc0e24456bf5678f63497569c3676c9019f82c1 (diff)
KVM: Add accessor for reading cr4 (or some bits of cr4)
Some bits of cr4 can be owned by the guest on vmx, so when we read them, we copy them to the vcpu structure. In preparation for making the set of guest-owned bits dynamic, use helpers to access these bits so we don't need to know where the bit resides. No changes to svm since all bits are host-owned there. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index efbb614ccd36..284e905c59d3 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1615,8 +1615,10 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
1615 1615
1616static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) 1616static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1617{ 1617{
1618 vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK; 1618 ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
1619 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK; 1619
1620 vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
1621 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
1620} 1622}
1621 1623
1622static void ept_load_pdptrs(struct kvm_vcpu *vcpu) 1624static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
@@ -1661,7 +1663,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
1661 (CPU_BASED_CR3_LOAD_EXITING | 1663 (CPU_BASED_CR3_LOAD_EXITING |
1662 CPU_BASED_CR3_STORE_EXITING)); 1664 CPU_BASED_CR3_STORE_EXITING));
1663 vcpu->arch.cr0 = cr0; 1665 vcpu->arch.cr0 = cr0;
1664 vmx_set_cr4(vcpu, vcpu->arch.cr4); 1666 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
1665 } else if (!is_paging(vcpu)) { 1667 } else if (!is_paging(vcpu)) {
1666 /* From nonpaging to paging */ 1668 /* From nonpaging to paging */
1667 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, 1669 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
@@ -1669,7 +1671,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
1669 ~(CPU_BASED_CR3_LOAD_EXITING | 1671 ~(CPU_BASED_CR3_LOAD_EXITING |
1670 CPU_BASED_CR3_STORE_EXITING)); 1672 CPU_BASED_CR3_STORE_EXITING));
1671 vcpu->arch.cr0 = cr0; 1673 vcpu->arch.cr0 = cr0;
1672 vmx_set_cr4(vcpu, vcpu->arch.cr4); 1674 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
1673 } 1675 }
1674 1676
1675 if (!(cr0 & X86_CR0_WP)) 1677 if (!(cr0 & X86_CR0_WP))
@@ -2420,6 +2422,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
2420 2422
2421 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); 2423 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
2422 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); 2424 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
2425 vmx->vcpu.arch.cr4_guest_owned_bits = ~KVM_GUEST_CR4_MASK;
2423 2426
2424 tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc; 2427 tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc;
2425 rdtscll(tsc_this); 2428 rdtscll(tsc_this);
@@ -3050,7 +3053,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
3050 vcpu->arch.eff_db[dr] = val; 3053 vcpu->arch.eff_db[dr] = val;
3051 break; 3054 break;
3052 case 4 ... 5: 3055 case 4 ... 5:
3053 if (vcpu->arch.cr4 & X86_CR4_DE) 3056 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
3054 kvm_queue_exception(vcpu, UD_VECTOR); 3057 kvm_queue_exception(vcpu, UD_VECTOR);
3055 break; 3058 break;
3056 case 6: 3059 case 6: