diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2013-10-02 10:56:11 -0400 |
---|---|---|
committer | Gleb Natapov <gleb@redhat.com> | 2013-10-03 08:43:59 -0400 |
commit | d8d173dab2505e72b62882e5a580862e6ec1c06c (patch) | |
tree | 32a93165eb561cf337edc2def307bf0bbf9af126 /arch/x86/kvm/mmu.c | |
parent | 206260941fd4b6f25f28ecf4e267b2f9a0ba72d7 (diff) |
KVM: mmu: remove uninteresting MMU "new_cr3" callbacks
The new_cr3 MMU callback has been a wrapper for mmu_free_roots since commit
e676505 (KVM: MMU: Force cr3 reload with two dimensional paging on mov
cr3 emulation, 2012-07-08).
The commit message mentioned that "mmu_free_roots() is somewhat of an overkill,
but fixing that is more complicated and will be done after this minimal fix".
One year has passed, and no one really felt the need to do a different fix.
Wrap the call with a kvm_mmu_new_cr3 function for clarity, but remove the
callback.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 13 |
1 files changed, 1 insertions, 12 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 1c4d580ba347..dff856c8621a 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2570,11 +2570,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
2570 | kvm_release_pfn_clean(pfn); | 2570 | kvm_release_pfn_clean(pfn); |
2571 | } | 2571 | } |
2572 | 2572 | ||
2573 | static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) | ||
2574 | { | ||
2575 | mmu_free_roots(vcpu); | ||
2576 | } | ||
2577 | |||
2578 | static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, | 2573 | static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, |
2579 | bool no_dirty_log) | 2574 | bool no_dirty_log) |
2580 | { | 2575 | { |
@@ -3427,7 +3422,6 @@ out_unlock: | |||
3427 | static int nonpaging_init_context(struct kvm_vcpu *vcpu, | 3422 | static int nonpaging_init_context(struct kvm_vcpu *vcpu, |
3428 | struct kvm_mmu *context) | 3423 | struct kvm_mmu *context) |
3429 | { | 3424 | { |
3430 | context->new_cr3 = nonpaging_new_cr3; | ||
3431 | context->page_fault = nonpaging_page_fault; | 3425 | context->page_fault = nonpaging_page_fault; |
3432 | context->gva_to_gpa = nonpaging_gva_to_gpa; | 3426 | context->gva_to_gpa = nonpaging_gva_to_gpa; |
3433 | context->sync_page = nonpaging_sync_page; | 3427 | context->sync_page = nonpaging_sync_page; |
@@ -3448,9 +3442,8 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) | |||
3448 | } | 3442 | } |
3449 | EXPORT_SYMBOL_GPL(kvm_mmu_flush_tlb); | 3443 | EXPORT_SYMBOL_GPL(kvm_mmu_flush_tlb); |
3450 | 3444 | ||
3451 | static void paging_new_cr3(struct kvm_vcpu *vcpu) | 3445 | void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu) |
3452 | { | 3446 | { |
3453 | pgprintk("%s: cr3 %lx\n", __func__, kvm_read_cr3(vcpu)); | ||
3454 | mmu_free_roots(vcpu); | 3447 | mmu_free_roots(vcpu); |
3455 | } | 3448 | } |
3456 | 3449 | ||
@@ -3666,7 +3659,6 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, | |||
3666 | update_last_pte_bitmap(vcpu, context); | 3659 | update_last_pte_bitmap(vcpu, context); |
3667 | 3660 | ||
3668 | ASSERT(is_pae(vcpu)); | 3661 | ASSERT(is_pae(vcpu)); |
3669 | context->new_cr3 = paging_new_cr3; | ||
3670 | context->page_fault = paging64_page_fault; | 3662 | context->page_fault = paging64_page_fault; |
3671 | context->gva_to_gpa = paging64_gva_to_gpa; | 3663 | context->gva_to_gpa = paging64_gva_to_gpa; |
3672 | context->sync_page = paging64_sync_page; | 3664 | context->sync_page = paging64_sync_page; |
@@ -3694,7 +3686,6 @@ static int paging32_init_context(struct kvm_vcpu *vcpu, | |||
3694 | update_permission_bitmask(vcpu, context, false); | 3686 | update_permission_bitmask(vcpu, context, false); |
3695 | update_last_pte_bitmap(vcpu, context); | 3687 | update_last_pte_bitmap(vcpu, context); |
3696 | 3688 | ||
3697 | context->new_cr3 = paging_new_cr3; | ||
3698 | context->page_fault = paging32_page_fault; | 3689 | context->page_fault = paging32_page_fault; |
3699 | context->gva_to_gpa = paging32_gva_to_gpa; | 3690 | context->gva_to_gpa = paging32_gva_to_gpa; |
3700 | context->sync_page = paging32_sync_page; | 3691 | context->sync_page = paging32_sync_page; |
@@ -3717,7 +3708,6 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) | |||
3717 | struct kvm_mmu *context = vcpu->arch.walk_mmu; | 3708 | struct kvm_mmu *context = vcpu->arch.walk_mmu; |
3718 | 3709 | ||
3719 | context->base_role.word = 0; | 3710 | context->base_role.word = 0; |
3720 | context->new_cr3 = nonpaging_new_cr3; | ||
3721 | context->page_fault = tdp_page_fault; | 3711 | context->page_fault = tdp_page_fault; |
3722 | context->sync_page = nonpaging_sync_page; | 3712 | context->sync_page = nonpaging_sync_page; |
3723 | context->invlpg = nonpaging_invlpg; | 3713 | context->invlpg = nonpaging_invlpg; |
@@ -3792,7 +3782,6 @@ int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context, | |||
3792 | context->shadow_root_level = kvm_x86_ops->get_tdp_level(); | 3782 | context->shadow_root_level = kvm_x86_ops->get_tdp_level(); |
3793 | 3783 | ||
3794 | context->nx = true; | 3784 | context->nx = true; |
3795 | context->new_cr3 = paging_new_cr3; | ||
3796 | context->page_fault = ept_page_fault; | 3785 | context->page_fault = ept_page_fault; |
3797 | context->gva_to_gpa = ept_gva_to_gpa; | 3786 | context->gva_to_gpa = ept_gva_to_gpa; |
3798 | context->sync_page = ept_sync_page; | 3787 | context->sync_page = ept_sync_page; |