aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorIzik Eidus <izike@qumranet.com>2008-03-30 08:17:21 -0400
committerAvi Kivity <avi@qumranet.com>2008-04-27 05:00:53 -0400
commit3ee16c814511cd58f956b47b9c7654f57f674688 (patch)
tree38eb024cd6292c85b57a8ea7b2a761e174db3884 /arch/x86/kvm/mmu.c
parent3200f405a1e8e06c8634f11d33614455baa4e6be (diff)
KVM: MMU: allow the vm to shrink the kvm mmu shadow caches
Allow the Linux memory manager to reclaim memory in the kvm shadow cache. Signed-off-by: Izik Eidus <izike@qumranet.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c58
1 files changed, 56 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c563283cb982..1594ee06c920 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1966,7 +1966,53 @@ void kvm_mmu_zap_all(struct kvm *kvm)
1966 kvm_flush_remote_tlbs(kvm); 1966 kvm_flush_remote_tlbs(kvm);
1967} 1967}
1968 1968
1969void kvm_mmu_module_exit(void) 1969void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
1970{
1971 struct kvm_mmu_page *page;
1972
1973 page = container_of(kvm->arch.active_mmu_pages.prev,
1974 struct kvm_mmu_page, link);
1975 kvm_mmu_zap_page(kvm, page);
1976}
1977
1978static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
1979{
1980 struct kvm *kvm;
1981 struct kvm *kvm_freed = NULL;
1982 int cache_count = 0;
1983
1984 spin_lock(&kvm_lock);
1985
1986 list_for_each_entry(kvm, &vm_list, vm_list) {
1987 int npages;
1988
1989 spin_lock(&kvm->mmu_lock);
1990 npages = kvm->arch.n_alloc_mmu_pages -
1991 kvm->arch.n_free_mmu_pages;
1992 cache_count += npages;
1993 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
1994 kvm_mmu_remove_one_alloc_mmu_page(kvm);
1995 cache_count--;
1996 kvm_freed = kvm;
1997 }
1998 nr_to_scan--;
1999
2000 spin_unlock(&kvm->mmu_lock);
2001 }
2002 if (kvm_freed)
2003 list_move_tail(&kvm_freed->vm_list, &vm_list);
2004
2005 spin_unlock(&kvm_lock);
2006
2007 return cache_count;
2008}
2009
2010static struct shrinker mmu_shrinker = {
2011 .shrink = mmu_shrink,
2012 .seeks = DEFAULT_SEEKS * 10,
2013};
2014
2015void mmu_destroy_caches(void)
1970{ 2016{
1971 if (pte_chain_cache) 2017 if (pte_chain_cache)
1972 kmem_cache_destroy(pte_chain_cache); 2018 kmem_cache_destroy(pte_chain_cache);
@@ -1976,6 +2022,12 @@ void kvm_mmu_module_exit(void)
1976 kmem_cache_destroy(mmu_page_header_cache); 2022 kmem_cache_destroy(mmu_page_header_cache);
1977} 2023}
1978 2024
2025void kvm_mmu_module_exit(void)
2026{
2027 mmu_destroy_caches();
2028 unregister_shrinker(&mmu_shrinker);
2029}
2030
1979int kvm_mmu_module_init(void) 2031int kvm_mmu_module_init(void)
1980{ 2032{
1981 pte_chain_cache = kmem_cache_create("kvm_pte_chain", 2033 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
@@ -1995,10 +2047,12 @@ int kvm_mmu_module_init(void)
1995 if (!mmu_page_header_cache) 2047 if (!mmu_page_header_cache)
1996 goto nomem; 2048 goto nomem;
1997 2049
2050 register_shrinker(&mmu_shrinker);
2051
1998 return 0; 2052 return 0;
1999 2053
2000nomem: 2054nomem:
2001 kvm_mmu_module_exit(); 2055 mmu_destroy_caches();
2002 return -ENOMEM; 2056 return -ENOMEM;
2003} 2057}
2004 2058