aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2013-01-30 09:45:05 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2013-02-05 19:47:39 -0500
commit834be0d83f9451573e6fadb381fe0714211c7e90 (patch)
tree11a18d840145e96cc3b34f8b9891fe4df8f8cd5c /arch/x86/kvm/mmu.c
parenteb3fce87ccc5d38b1ad340f32e34abc09911fb83 (diff)
Revert "KVM: MMU: split kvm_mmu_free_page"
This reverts commit bd4c86eaa6ff10abc4e00d0f45d2a28b10b09df4. There is not user for kvm_mmu_isolate_page() any more. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c21
1 files changed, 3 insertions, 18 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 42ba85c62fcb..0242a8a1b2e2 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1461,28 +1461,14 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
1461 percpu_counter_add(&kvm_total_used_mmu_pages, nr); 1461 percpu_counter_add(&kvm_total_used_mmu_pages, nr);
1462} 1462}
1463 1463
1464/* 1464static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
1465 * Remove the sp from shadow page cache, after call it,
1466 * we can not find this sp from the cache, and the shadow
1467 * page table is still valid.
1468 * It should be under the protection of mmu lock.
1469 */
1470static void kvm_mmu_isolate_page(struct kvm_mmu_page *sp)
1471{ 1465{
1472 ASSERT(is_empty_shadow_page(sp->spt)); 1466 ASSERT(is_empty_shadow_page(sp->spt));
1473 hlist_del(&sp->hash_link); 1467 hlist_del(&sp->hash_link);
1474 if (!sp->role.direct)
1475 free_page((unsigned long)sp->gfns);
1476}
1477
1478/*
1479 * Free the shadow page table and the sp, we can do it
1480 * out of the protection of mmu lock.
1481 */
1482static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
1483{
1484 list_del(&sp->link); 1468 list_del(&sp->link);
1485 free_page((unsigned long)sp->spt); 1469 free_page((unsigned long)sp->spt);
1470 if (!sp->role.direct)
1471 free_page((unsigned long)sp->gfns);
1486 kmem_cache_free(mmu_page_header_cache, sp); 1472 kmem_cache_free(mmu_page_header_cache, sp);
1487} 1473}
1488 1474
@@ -2126,7 +2112,6 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2126 do { 2112 do {
2127 sp = list_first_entry(invalid_list, struct kvm_mmu_page, link); 2113 sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
2128 WARN_ON(!sp->role.invalid || sp->root_count); 2114 WARN_ON(!sp->role.invalid || sp->root_count);
2129 kvm_mmu_isolate_page(sp);
2130 kvm_mmu_free_page(sp); 2115 kvm_mmu_free_page(sp);
2131 } while (!list_empty(invalid_list)); 2116 } while (!list_empty(invalid_list));
2132} 2117}