diff options
| -rw-r--r-- | arch/x86/kvm/mmu.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index a62ba462972e..91d30695677b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -1039,7 +1039,7 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr) | |||
| 1039 | percpu_counter_add(&kvm_total_used_mmu_pages, nr); | 1039 | percpu_counter_add(&kvm_total_used_mmu_pages, nr); |
| 1040 | } | 1040 | } |
| 1041 | 1041 | ||
| 1042 | static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp) | 1042 | static void kvm_mmu_free_page(struct kvm_mmu_page *sp) |
| 1043 | { | 1043 | { |
| 1044 | ASSERT(is_empty_shadow_page(sp->spt)); | 1044 | ASSERT(is_empty_shadow_page(sp->spt)); |
| 1045 | hlist_del(&sp->hash_link); | 1045 | hlist_del(&sp->hash_link); |
| @@ -1048,7 +1048,6 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp) | |||
| 1048 | if (!sp->role.direct) | 1048 | if (!sp->role.direct) |
| 1049 | free_page((unsigned long)sp->gfns); | 1049 | free_page((unsigned long)sp->gfns); |
| 1050 | kmem_cache_free(mmu_page_header_cache, sp); | 1050 | kmem_cache_free(mmu_page_header_cache, sp); |
| 1051 | kvm_mod_used_mmu_pages(kvm, -1); | ||
| 1052 | } | 1051 | } |
| 1053 | 1052 | ||
| 1054 | static unsigned kvm_page_table_hashfn(gfn_t gfn) | 1053 | static unsigned kvm_page_table_hashfn(gfn_t gfn) |
| @@ -1655,6 +1654,7 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, | |||
| 1655 | /* Count self */ | 1654 | /* Count self */ |
| 1656 | ret++; | 1655 | ret++; |
| 1657 | list_move(&sp->link, invalid_list); | 1656 | list_move(&sp->link, invalid_list); |
| 1657 | kvm_mod_used_mmu_pages(kvm, -1); | ||
| 1658 | } else { | 1658 | } else { |
| 1659 | list_move(&sp->link, &kvm->arch.active_mmu_pages); | 1659 | list_move(&sp->link, &kvm->arch.active_mmu_pages); |
| 1660 | kvm_reload_remote_mmus(kvm); | 1660 | kvm_reload_remote_mmus(kvm); |
| @@ -1678,7 +1678,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, | |||
| 1678 | do { | 1678 | do { |
| 1679 | sp = list_first_entry(invalid_list, struct kvm_mmu_page, link); | 1679 | sp = list_first_entry(invalid_list, struct kvm_mmu_page, link); |
| 1680 | WARN_ON(!sp->role.invalid || sp->root_count); | 1680 | WARN_ON(!sp->role.invalid || sp->root_count); |
| 1681 | kvm_mmu_free_page(kvm, sp); | 1681 | kvm_mmu_free_page(sp); |
| 1682 | } while (!list_empty(invalid_list)); | 1682 | } while (!list_empty(invalid_list)); |
| 1683 | 1683 | ||
| 1684 | } | 1684 | } |
| @@ -1704,8 +1704,8 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) | |||
| 1704 | page = container_of(kvm->arch.active_mmu_pages.prev, | 1704 | page = container_of(kvm->arch.active_mmu_pages.prev, |
| 1705 | struct kvm_mmu_page, link); | 1705 | struct kvm_mmu_page, link); |
| 1706 | kvm_mmu_prepare_zap_page(kvm, page, &invalid_list); | 1706 | kvm_mmu_prepare_zap_page(kvm, page, &invalid_list); |
| 1707 | kvm_mmu_commit_zap_page(kvm, &invalid_list); | ||
| 1708 | } | 1707 | } |
| 1708 | kvm_mmu_commit_zap_page(kvm, &invalid_list); | ||
| 1709 | goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; | 1709 | goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; |
| 1710 | } | 1710 | } |
| 1711 | 1711 | ||
| @@ -3302,9 +3302,9 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | |||
| 3302 | sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev, | 3302 | sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev, |
| 3303 | struct kvm_mmu_page, link); | 3303 | struct kvm_mmu_page, link); |
| 3304 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); | 3304 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); |
| 3305 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); | ||
| 3306 | ++vcpu->kvm->stat.mmu_recycled; | 3305 | ++vcpu->kvm->stat.mmu_recycled; |
| 3307 | } | 3306 | } |
| 3307 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); | ||
| 3308 | } | 3308 | } |
| 3309 | 3309 | ||
| 3310 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, | 3310 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, |
