diff options
author | Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp> | 2013-03-21 06:34:27 -0400 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2013-03-21 18:45:01 -0400 |
commit | 81f4f76bbc712a2dff8bb020057c554e285370e1 (patch) | |
tree | 14951f49387558deb4258cce5af8c98df9c9f7af /arch/x86/kvm/mmu.c | |
parent | 7ddca7e43c8f28f9419da81a0e7730b66aa60fe9 (diff) |
KVM: MMU: Rename kvm_mmu_free_some_pages() to make_mmu_pages_available()
The current name "kvm_mmu_free_some_pages" should be used for something
that actually frees some shadow pages, as we expect from the name, but
what the function is doing is to make some, KVM_MIN_FREE_MMU_PAGES,
shadow pages available: it does nothing when there are enough.
This patch changes the name to reflect this meaning better; while doing
this renaming, the code in the wrapper function is inlined into the main
body since the whole function will be inlined into the only caller now.
Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 9 |
1 files changed, 7 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 38f34c5361f4..633e30cfbd63 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1501,12 +1501,14 @@ static void drop_parent_pte(struct kvm_mmu_page *sp, | |||
1501 | mmu_spte_clear_no_track(parent_pte); | 1501 | mmu_spte_clear_no_track(parent_pte); |
1502 | } | 1502 | } |
1503 | 1503 | ||
1504 | static void make_mmu_pages_available(struct kvm_vcpu *vcpu); | ||
1505 | |||
1504 | static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, | 1506 | static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, |
1505 | u64 *parent_pte, int direct) | 1507 | u64 *parent_pte, int direct) |
1506 | { | 1508 | { |
1507 | struct kvm_mmu_page *sp; | 1509 | struct kvm_mmu_page *sp; |
1508 | 1510 | ||
1509 | kvm_mmu_free_some_pages(vcpu); | 1511 | make_mmu_pages_available(vcpu); |
1510 | 1512 | ||
1511 | sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); | 1513 | sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); |
1512 | sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); | 1514 | sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); |
@@ -4010,10 +4012,13 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) | |||
4010 | } | 4012 | } |
4011 | EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); | 4013 | EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); |
4012 | 4014 | ||
4013 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | 4015 | static void make_mmu_pages_available(struct kvm_vcpu *vcpu) |
4014 | { | 4016 | { |
4015 | LIST_HEAD(invalid_list); | 4017 | LIST_HEAD(invalid_list); |
4016 | 4018 | ||
4019 | if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES)) | ||
4020 | return; | ||
4021 | |||
4017 | while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) { | 4022 | while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) { |
4018 | if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list)) | 4023 | if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list)) |
4019 | break; | 4024 | break; |