aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/mmu.c
diff options
context:
space:
mode:
authorIzik Eidus <izike@qumranet.com>2007-10-02 12:52:55 -0400
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:52:50 -0500
commit82ce2c96831f049a37118733ced5c8f7c8848102 (patch)
tree609fed010ebbb1ced6f0f24698148e69a72da5fd /drivers/kvm/mmu.c
parent195aefde9cc2cee38dd54ef92a866721fba4413e (diff)
KVM: Allow dynamic allocation of the mmu shadow cache size
The user is now able to set how many mmu pages will be allocated to the guest. Signed-off-by: Izik Eidus <izike@qumranet.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r--drivers/kvm/mmu.c40
1 files changed, 38 insertions, 2 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 72757db15065..6cda1feb9a95 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -747,6 +747,40 @@ static void kvm_mmu_zap_page(struct kvm *kvm,
747 kvm_mmu_reset_last_pte_updated(kvm); 747 kvm_mmu_reset_last_pte_updated(kvm);
748} 748}
749 749
750/*
751 * Changing the number of mmu pages allocated to the vm
752 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
753 */
754void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
755{
756 /*
757 * If we set the number of mmu pages to be smaller be than the
758 * number of actived pages , we must to free some mmu pages before we
759 * change the value
760 */
761
762 if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) >
763 kvm_nr_mmu_pages) {
764 int n_used_mmu_pages = kvm->n_alloc_mmu_pages
765 - kvm->n_free_mmu_pages;
766
767 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
768 struct kvm_mmu_page *page;
769
770 page = container_of(kvm->active_mmu_pages.prev,
771 struct kvm_mmu_page, link);
772 kvm_mmu_zap_page(kvm, page);
773 n_used_mmu_pages--;
774 }
775 kvm->n_free_mmu_pages = 0;
776 }
777 else
778 kvm->n_free_mmu_pages += kvm_nr_mmu_pages
779 - kvm->n_alloc_mmu_pages;
780
781 kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
782}
783
750static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) 784static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
751{ 785{
752 unsigned index; 786 unsigned index;
@@ -1297,8 +1331,10 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1297 1331
1298 ASSERT(vcpu); 1332 ASSERT(vcpu);
1299 1333
1300 vcpu->kvm->n_free_mmu_pages = KVM_NUM_MMU_PAGES; 1334 if (vcpu->kvm->n_requested_mmu_pages)
1301 1335 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages;
1336 else
1337 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages;
1302 /* 1338 /*
1303 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. 1339 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1304 * Therefore we need to allocate shadow page tables in the first 1340 * Therefore we need to allocate shadow page tables in the first