diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 27 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.h | 3 |
3 files changed, 12 insertions, 20 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 02963684cd28..e01b72825564 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -367,7 +367,7 @@ struct kvm_vcpu_arch { | |||
367 | }; | 367 | }; |
368 | 368 | ||
369 | struct kvm_arch { | 369 | struct kvm_arch { |
370 | unsigned int n_free_mmu_pages; | 370 | unsigned int n_used_mmu_pages; |
371 | unsigned int n_requested_mmu_pages; | 371 | unsigned int n_requested_mmu_pages; |
372 | unsigned int n_max_mmu_pages; | 372 | unsigned int n_max_mmu_pages; |
373 | atomic_t invlpg_counter; | 373 | atomic_t invlpg_counter; |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 6979e7d1464e..ff39b85d7a4d 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -980,7 +980,7 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp) | |||
980 | if (!sp->role.direct) | 980 | if (!sp->role.direct) |
981 | __free_page(virt_to_page(sp->gfns)); | 981 | __free_page(virt_to_page(sp->gfns)); |
982 | kmem_cache_free(mmu_page_header_cache, sp); | 982 | kmem_cache_free(mmu_page_header_cache, sp); |
983 | ++kvm->arch.n_free_mmu_pages; | 983 | --kvm->arch.n_used_mmu_pages; |
984 | } | 984 | } |
985 | 985 | ||
986 | static unsigned kvm_page_table_hashfn(gfn_t gfn) | 986 | static unsigned kvm_page_table_hashfn(gfn_t gfn) |
@@ -1003,7 +1003,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, | |||
1003 | bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); | 1003 | bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); |
1004 | sp->multimapped = 0; | 1004 | sp->multimapped = 0; |
1005 | sp->parent_pte = parent_pte; | 1005 | sp->parent_pte = parent_pte; |
1006 | --vcpu->kvm->arch.n_free_mmu_pages; | 1006 | ++vcpu->kvm->arch.n_used_mmu_pages; |
1007 | return sp; | 1007 | return sp; |
1008 | } | 1008 | } |
1009 | 1009 | ||
@@ -1689,41 +1689,32 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, | |||
1689 | 1689 | ||
1690 | /* | 1690 | /* |
1691 | * Changing the number of mmu pages allocated to the vm | 1691 | * Changing the number of mmu pages allocated to the vm |
1692 | * Note: if kvm_nr_mmu_pages is too small, you will get dead lock | 1692 | * Note: if goal_nr_mmu_pages is too small, you will get dead lock |
1693 | */ | 1693 | */ |
1694 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) | 1694 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) |
1695 | { | 1695 | { |
1696 | int used_pages; | ||
1697 | LIST_HEAD(invalid_list); | 1696 | LIST_HEAD(invalid_list); |
1698 | |||
1699 | used_pages = kvm->arch.n_max_mmu_pages - kvm_mmu_available_pages(kvm); | ||
1700 | used_pages = max(0, used_pages); | ||
1701 | |||
1702 | /* | 1697 | /* |
1703 | * If we set the number of mmu pages to be smaller be than the | 1698 | * If we set the number of mmu pages to be smaller be than the |
1704 | * number of actived pages , we must to free some mmu pages before we | 1699 | * number of actived pages , we must to free some mmu pages before we |
1705 | * change the value | 1700 | * change the value |
1706 | */ | 1701 | */ |
1707 | 1702 | ||
1708 | if (used_pages > kvm_nr_mmu_pages) { | 1703 | if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { |
1709 | while (used_pages > kvm_nr_mmu_pages && | 1704 | while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages && |
1710 | !list_empty(&kvm->arch.active_mmu_pages)) { | 1705 | !list_empty(&kvm->arch.active_mmu_pages)) { |
1711 | struct kvm_mmu_page *page; | 1706 | struct kvm_mmu_page *page; |
1712 | 1707 | ||
1713 | page = container_of(kvm->arch.active_mmu_pages.prev, | 1708 | page = container_of(kvm->arch.active_mmu_pages.prev, |
1714 | struct kvm_mmu_page, link); | 1709 | struct kvm_mmu_page, link); |
1715 | used_pages -= kvm_mmu_prepare_zap_page(kvm, page, | 1710 | kvm_mmu_prepare_zap_page(kvm, page, |
1716 | &invalid_list); | 1711 | &invalid_list); |
1717 | } | 1712 | } |
1718 | kvm_mmu_commit_zap_page(kvm, &invalid_list); | 1713 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
1719 | kvm_nr_mmu_pages = used_pages; | 1714 | goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; |
1720 | kvm->arch.n_free_mmu_pages = 0; | ||
1721 | } | 1715 | } |
1722 | else | ||
1723 | kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages | ||
1724 | - kvm->arch.n_max_mmu_pages; | ||
1725 | 1716 | ||
1726 | kvm->arch.n_max_mmu_pages = kvm_nr_mmu_pages; | 1717 | kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; |
1727 | } | 1718 | } |
1728 | 1719 | ||
1729 | static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) | 1720 | static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) |
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index c3a689ae7df0..f05a03dfba4e 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h | |||
@@ -52,7 +52,8 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); | |||
52 | 52 | ||
53 | static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) | 53 | static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) |
54 | { | 54 | { |
55 | return kvm->arch.n_free_mmu_pages; | 55 | return kvm->arch.n_max_mmu_pages - |
56 | kvm->arch.n_used_mmu_pages; | ||
56 | } | 57 | } |
57 | 58 | ||
58 | static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | 59 | static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) |