aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorZhang Xiantao <xiantao.zhang@intel.com>2007-11-20 00:11:38 -0500
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:53:09 -0500
commit3ad82a7e874c5d6c4045090cc01d7794dd9eb21c (patch)
tree94aabdff667daa7de80b61dd7d0233af21a54fa4
parent6226686954c4cce3d63ffe1777e60360fcbf0b83 (diff)
KVM: Recalculate mmu pages needed for every memory region change
Instead of incrementally changing the mmu cache size for every memory slot operation, recalculate it from scratch. This is simpler and safer. Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r--drivers/kvm/kvm_main.c21
-rw-r--r--drivers/kvm/mmu.c19
-rw-r--r--drivers/kvm/x86.h1
3 files changed, 24 insertions, 17 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 729573b844e5..93ecafbfb1b6 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -333,26 +333,13 @@ int __kvm_set_memory_region(struct kvm *kvm,
333 if (mem->slot >= kvm->nmemslots) 333 if (mem->slot >= kvm->nmemslots)
334 kvm->nmemslots = mem->slot + 1; 334 kvm->nmemslots = mem->slot + 1;
335 335
336 *memslot = new;
337
336 if (!kvm->n_requested_mmu_pages) { 338 if (!kvm->n_requested_mmu_pages) {
337 unsigned int n_pages; 339 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
338 340 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
339 if (npages) {
340 n_pages = npages * KVM_PERMILLE_MMU_PAGES / 1000;
341 kvm_mmu_change_mmu_pages(kvm, kvm->n_alloc_mmu_pages +
342 n_pages);
343 } else {
344 unsigned int nr_mmu_pages;
345
346 n_pages = old.npages * KVM_PERMILLE_MMU_PAGES / 1000;
347 nr_mmu_pages = kvm->n_alloc_mmu_pages - n_pages;
348 nr_mmu_pages = max(nr_mmu_pages,
349 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
350 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
351 }
352 } 341 }
353 342
354 *memslot = new;
355
356 kvm_mmu_slot_remove_write_access(kvm, mem->slot); 343 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
357 kvm_flush_remote_tlbs(kvm); 344 kvm_flush_remote_tlbs(kvm);
358 345
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 4624f3789b9a..101cd5377a89 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -1535,6 +1535,25 @@ nomem:
1535 return -ENOMEM; 1535 return -ENOMEM;
1536} 1536}
1537 1537
1538/*
1539 * Caculate mmu pages needed for kvm.
1540 */
1541unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
1542{
1543 int i;
1544 unsigned int nr_mmu_pages;
1545 unsigned int nr_pages = 0;
1546
1547 for (i = 0; i < kvm->nmemslots; i++)
1548 nr_pages += kvm->memslots[i].npages;
1549
1550 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
1551 nr_mmu_pages = max(nr_mmu_pages,
1552 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
1553
1554 return nr_mmu_pages;
1555}
1556
1538#ifdef AUDIT 1557#ifdef AUDIT
1539 1558
1540static const char *audit_msg; 1559static const char *audit_msg;
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
index 90b791bf6287..71f2477d03fd 100644
--- a/drivers/kvm/x86.h
+++ b/drivers/kvm/x86.h
@@ -236,6 +236,7 @@ void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
236int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); 236int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
237void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); 237void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
238void kvm_mmu_zap_all(struct kvm *kvm); 238void kvm_mmu_zap_all(struct kvm *kvm);
239unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
239void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 240void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
240 241
241enum emulation_result { 242enum emulation_result {