diff options
author | Avi Kivity <avi@redhat.com> | 2009-03-23 12:26:32 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-06-10 04:48:32 -0400 |
commit | 089d034e0c4538d2436512fa64782b91008d4a7c (patch) | |
tree | 02323649ff18e4d1196887ee346e318f8f0aab8d /arch/x86/kvm/vmx.c | |
parent | 575ff2dcb25608d53737d1126ee0e7e4d6f11752 (diff) |
KVM: VMX: Fold vm_need_ept() into callers
Trivial.
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 33 |
1 files changed, 14 insertions, 19 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 2f65120cf283..da6461d5dc84 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -263,11 +263,6 @@ static inline int cpu_has_vmx_ept(void) | |||
263 | SECONDARY_EXEC_ENABLE_EPT); | 263 | SECONDARY_EXEC_ENABLE_EPT); |
264 | } | 264 | } |
265 | 265 | ||
266 | static inline int vm_need_ept(void) | ||
267 | { | ||
268 | return enable_ept; | ||
269 | } | ||
270 | |||
271 | static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm) | 266 | static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm) |
272 | { | 267 | { |
273 | return ((cpu_has_vmx_virtualize_apic_accesses()) && | 268 | return ((cpu_has_vmx_virtualize_apic_accesses()) && |
@@ -382,7 +377,7 @@ static inline void ept_sync_global(void) | |||
382 | 377 | ||
383 | static inline void ept_sync_context(u64 eptp) | 378 | static inline void ept_sync_context(u64 eptp) |
384 | { | 379 | { |
385 | if (vm_need_ept()) { | 380 | if (enable_ept) { |
386 | if (cpu_has_vmx_invept_context()) | 381 | if (cpu_has_vmx_invept_context()) |
387 | __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0); | 382 | __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0); |
388 | else | 383 | else |
@@ -392,7 +387,7 @@ static inline void ept_sync_context(u64 eptp) | |||
392 | 387 | ||
393 | static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa) | 388 | static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa) |
394 | { | 389 | { |
395 | if (vm_need_ept()) { | 390 | if (enable_ept) { |
396 | if (cpu_has_vmx_invept_individual_addr()) | 391 | if (cpu_has_vmx_invept_individual_addr()) |
397 | __invept(VMX_EPT_EXTENT_INDIVIDUAL_ADDR, | 392 | __invept(VMX_EPT_EXTENT_INDIVIDUAL_ADDR, |
398 | eptp, gpa); | 393 | eptp, gpa); |
@@ -491,7 +486,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) | |||
491 | } | 486 | } |
492 | if (vcpu->arch.rmode.active) | 487 | if (vcpu->arch.rmode.active) |
493 | eb = ~0; | 488 | eb = ~0; |
494 | if (vm_need_ept()) | 489 | if (enable_ept) |
495 | eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ | 490 | eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ |
496 | vmcs_write32(EXCEPTION_BITMAP, eb); | 491 | vmcs_write32(EXCEPTION_BITMAP, eb); |
497 | } | 492 | } |
@@ -1502,7 +1497,7 @@ static void exit_lmode(struct kvm_vcpu *vcpu) | |||
1502 | static void vmx_flush_tlb(struct kvm_vcpu *vcpu) | 1497 | static void vmx_flush_tlb(struct kvm_vcpu *vcpu) |
1503 | { | 1498 | { |
1504 | vpid_sync_vcpu_all(to_vmx(vcpu)); | 1499 | vpid_sync_vcpu_all(to_vmx(vcpu)); |
1505 | if (vm_need_ept()) | 1500 | if (enable_ept) |
1506 | ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa)); | 1501 | ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa)); |
1507 | } | 1502 | } |
1508 | 1503 | ||
@@ -1587,7 +1582,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
1587 | } | 1582 | } |
1588 | #endif | 1583 | #endif |
1589 | 1584 | ||
1590 | if (vm_need_ept()) | 1585 | if (enable_ept) |
1591 | ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); | 1586 | ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); |
1592 | 1587 | ||
1593 | vmcs_writel(CR0_READ_SHADOW, cr0); | 1588 | vmcs_writel(CR0_READ_SHADOW, cr0); |
@@ -1616,7 +1611,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
1616 | u64 eptp; | 1611 | u64 eptp; |
1617 | 1612 | ||
1618 | guest_cr3 = cr3; | 1613 | guest_cr3 = cr3; |
1619 | if (vm_need_ept()) { | 1614 | if (enable_ept) { |
1620 | eptp = construct_eptp(cr3); | 1615 | eptp = construct_eptp(cr3); |
1621 | vmcs_write64(EPT_POINTER, eptp); | 1616 | vmcs_write64(EPT_POINTER, eptp); |
1622 | ept_sync_context(eptp); | 1617 | ept_sync_context(eptp); |
@@ -1637,7 +1632,7 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
1637 | KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); | 1632 | KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); |
1638 | 1633 | ||
1639 | vcpu->arch.cr4 = cr4; | 1634 | vcpu->arch.cr4 = cr4; |
1640 | if (vm_need_ept()) | 1635 | if (enable_ept) |
1641 | ept_update_paging_mode_cr4(&hw_cr4, vcpu); | 1636 | ept_update_paging_mode_cr4(&hw_cr4, vcpu); |
1642 | 1637 | ||
1643 | vmcs_writel(CR4_READ_SHADOW, cr4); | 1638 | vmcs_writel(CR4_READ_SHADOW, cr4); |
@@ -1999,7 +1994,7 @@ static int init_rmode_identity_map(struct kvm *kvm) | |||
1999 | pfn_t identity_map_pfn; | 1994 | pfn_t identity_map_pfn; |
2000 | u32 tmp; | 1995 | u32 tmp; |
2001 | 1996 | ||
2002 | if (!vm_need_ept()) | 1997 | if (!enable_ept) |
2003 | return 1; | 1998 | return 1; |
2004 | if (unlikely(!kvm->arch.ept_identity_pagetable)) { | 1999 | if (unlikely(!kvm->arch.ept_identity_pagetable)) { |
2005 | printk(KERN_ERR "EPT: identity-mapping pagetable " | 2000 | printk(KERN_ERR "EPT: identity-mapping pagetable " |
@@ -2163,7 +2158,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
2163 | CPU_BASED_CR8_LOAD_EXITING; | 2158 | CPU_BASED_CR8_LOAD_EXITING; |
2164 | #endif | 2159 | #endif |
2165 | } | 2160 | } |
2166 | if (!vm_need_ept()) | 2161 | if (!enable_ept) |
2167 | exec_control |= CPU_BASED_CR3_STORE_EXITING | | 2162 | exec_control |= CPU_BASED_CR3_STORE_EXITING | |
2168 | CPU_BASED_CR3_LOAD_EXITING | | 2163 | CPU_BASED_CR3_LOAD_EXITING | |
2169 | CPU_BASED_INVLPG_EXITING; | 2164 | CPU_BASED_INVLPG_EXITING; |
@@ -2176,7 +2171,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
2176 | ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; | 2171 | ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; |
2177 | if (vmx->vpid == 0) | 2172 | if (vmx->vpid == 0) |
2178 | exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; | 2173 | exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; |
2179 | if (!vm_need_ept()) | 2174 | if (!enable_ept) |
2180 | exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; | 2175 | exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; |
2181 | vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); | 2176 | vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); |
2182 | } | 2177 | } |
@@ -2637,7 +2632,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2637 | error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); | 2632 | error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); |
2638 | if (is_page_fault(intr_info)) { | 2633 | if (is_page_fault(intr_info)) { |
2639 | /* EPT won't cause page fault directly */ | 2634 | /* EPT won't cause page fault directly */ |
2640 | if (vm_need_ept()) | 2635 | if (enable_ept) |
2641 | BUG(); | 2636 | BUG(); |
2642 | cr2 = vmcs_readl(EXIT_QUALIFICATION); | 2637 | cr2 = vmcs_readl(EXIT_QUALIFICATION); |
2643 | KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, | 2638 | KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, |
@@ -3187,7 +3182,7 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
3187 | 3182 | ||
3188 | /* Access CR3 don't cause VMExit in paging mode, so we need | 3183 | /* Access CR3 don't cause VMExit in paging mode, so we need |
3189 | * to sync with guest real CR3. */ | 3184 | * to sync with guest real CR3. */ |
3190 | if (vm_need_ept() && is_paging(vcpu)) { | 3185 | if (enable_ept && is_paging(vcpu)) { |
3191 | vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); | 3186 | vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); |
3192 | ept_load_pdptrs(vcpu); | 3187 | ept_load_pdptrs(vcpu); |
3193 | } | 3188 | } |
@@ -3602,7 +3597,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) | |||
3602 | if (alloc_apic_access_page(kvm) != 0) | 3597 | if (alloc_apic_access_page(kvm) != 0) |
3603 | goto free_vmcs; | 3598 | goto free_vmcs; |
3604 | 3599 | ||
3605 | if (vm_need_ept()) | 3600 | if (enable_ept) |
3606 | if (alloc_identity_pagetable(kvm) != 0) | 3601 | if (alloc_identity_pagetable(kvm) != 0) |
3607 | goto free_vmcs; | 3602 | goto free_vmcs; |
3608 | 3603 | ||
@@ -3753,7 +3748,7 @@ static int __init vmx_init(void) | |||
3753 | vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); | 3748 | vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); |
3754 | vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); | 3749 | vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); |
3755 | 3750 | ||
3756 | if (vm_need_ept()) { | 3751 | if (enable_ept) { |
3757 | bypass_guest_pf = 0; | 3752 | bypass_guest_pf = 0; |
3758 | kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | | 3753 | kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | |
3759 | VMX_EPT_WRITABLE_MASK); | 3754 | VMX_EPT_WRITABLE_MASK); |