diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2013-10-02 10:56:14 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2015-01-08 16:48:02 -0500 |
commit | ad896af0b50ed656e38a31fca1fdb7bb7533db45 (patch) | |
tree | a633dad0d81ed0707fae701975845599beaa3d2b /arch/x86/kvm/mmu.c | |
parent | e0c6db3e22f564d91832547a2432ab00f215108e (diff) |
KVM: x86: mmu: remove argument to kvm_init_shadow_mmu and kvm_init_shadow_ept_mmu
The initialization function in mmu.c can always use walk_mmu, which
is known to be vcpu->arch.mmu. Only init_kvm_nested_mmu is used to
initialize vcpu->arch.nested_mmu.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 35 |
1 files changed, 20 insertions, 15 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 8ddbcb570fce..d6d3d6f0ff1b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -3763,7 +3763,7 @@ static void paging32E_init_context(struct kvm_vcpu *vcpu, | |||
3763 | 3763 | ||
3764 | static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) | 3764 | static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) |
3765 | { | 3765 | { |
3766 | struct kvm_mmu *context = vcpu->arch.walk_mmu; | 3766 | struct kvm_mmu *context = &vcpu->arch.mmu; |
3767 | 3767 | ||
3768 | context->base_role.word = 0; | 3768 | context->base_role.word = 0; |
3769 | context->page_fault = tdp_page_fault; | 3769 | context->page_fault = tdp_page_fault; |
@@ -3803,11 +3803,13 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) | |||
3803 | update_last_pte_bitmap(vcpu, context); | 3803 | update_last_pte_bitmap(vcpu, context); |
3804 | } | 3804 | } |
3805 | 3805 | ||
3806 | void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context) | 3806 | void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) |
3807 | { | 3807 | { |
3808 | bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); | 3808 | bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); |
3809 | struct kvm_mmu *context = &vcpu->arch.mmu; | ||
3810 | |||
3809 | ASSERT(vcpu); | 3811 | ASSERT(vcpu); |
3810 | ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); | 3812 | ASSERT(!VALID_PAGE(context->root_hpa)); |
3811 | 3813 | ||
3812 | if (!is_paging(vcpu)) | 3814 | if (!is_paging(vcpu)) |
3813 | nonpaging_init_context(vcpu, context); | 3815 | nonpaging_init_context(vcpu, context); |
@@ -3818,19 +3820,20 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context) | |||
3818 | else | 3820 | else |
3819 | paging32_init_context(vcpu, context); | 3821 | paging32_init_context(vcpu, context); |
3820 | 3822 | ||
3821 | vcpu->arch.mmu.base_role.nxe = is_nx(vcpu); | 3823 | context->base_role.nxe = is_nx(vcpu); |
3822 | vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu); | 3824 | context->base_role.cr4_pae = !!is_pae(vcpu); |
3823 | vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu); | 3825 | context->base_role.cr0_wp = is_write_protection(vcpu); |
3824 | vcpu->arch.mmu.base_role.smep_andnot_wp | 3826 | context->base_role.smep_andnot_wp |
3825 | = smep && !is_write_protection(vcpu); | 3827 | = smep && !is_write_protection(vcpu); |
3826 | } | 3828 | } |
3827 | EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); | 3829 | EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); |
3828 | 3830 | ||
3829 | void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context, | 3831 | void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly) |
3830 | bool execonly) | ||
3831 | { | 3832 | { |
3833 | struct kvm_mmu *context = &vcpu->arch.mmu; | ||
3834 | |||
3832 | ASSERT(vcpu); | 3835 | ASSERT(vcpu); |
3833 | ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); | 3836 | ASSERT(!VALID_PAGE(context->root_hpa)); |
3834 | 3837 | ||
3835 | context->shadow_root_level = kvm_x86_ops->get_tdp_level(); | 3838 | context->shadow_root_level = kvm_x86_ops->get_tdp_level(); |
3836 | 3839 | ||
@@ -3851,11 +3854,13 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); | |||
3851 | 3854 | ||
3852 | static void init_kvm_softmmu(struct kvm_vcpu *vcpu) | 3855 | static void init_kvm_softmmu(struct kvm_vcpu *vcpu) |
3853 | { | 3856 | { |
3854 | kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu); | 3857 | struct kvm_mmu *context = &vcpu->arch.mmu; |
3855 | vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3; | 3858 | |
3856 | vcpu->arch.walk_mmu->get_cr3 = get_cr3; | 3859 | kvm_init_shadow_mmu(vcpu); |
3857 | vcpu->arch.walk_mmu->get_pdptr = kvm_pdptr_read; | 3860 | context->set_cr3 = kvm_x86_ops->set_cr3; |
3858 | vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; | 3861 | context->get_cr3 = get_cr3; |
3862 | context->get_pdptr = kvm_pdptr_read; | ||
3863 | context->inject_page_fault = kvm_inject_page_fault; | ||
3859 | } | 3864 | } |
3860 | 3865 | ||
3861 | static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) | 3866 | static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) |