aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2009-07-22 12:05:49 -0400
committerAvi Kivity <avi@redhat.com>2009-08-05 06:59:43 -0400
commit025dbbf36a7680bffe54d9dcbf0a8bc01a7cbd10 (patch)
tree17ae3afc86e2cbd71101b136933432928e60b7b7 /arch/x86
parent4b656b1202498184a0ecef86b3b89ff613b9c6ab (diff)
KVM: MMU: handle n_free_mmu_pages > n_alloc_mmu_pages in kvm_mmu_change_mmu_pages
kvm_mmu_change_mmu_pages mishandles the case where n_alloc_mmu_pages is smaller then n_free_mmu_pages, by not checking if the result of the subtraction is negative. Its a valid condition which can happen if a large number of pages has been recently freed. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/mmu.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7030b5f911bf..49a10d008300 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1407,24 +1407,25 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1407 */ 1407 */
1408void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) 1408void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1409{ 1409{
1410 int used_pages;
1411
1412 used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages;
1413 used_pages = max(0, used_pages);
1414
1410 /* 1415 /*
1411 * If we set the number of mmu pages to be smaller be than the 1416 * If we set the number of mmu pages to be smaller be than the
1412 * number of actived pages , we must to free some mmu pages before we 1417 * number of actived pages , we must to free some mmu pages before we
1413 * change the value 1418 * change the value
1414 */ 1419 */
1415 1420
1416 if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) > 1421 if (used_pages > kvm_nr_mmu_pages) {
1417 kvm_nr_mmu_pages) { 1422 while (used_pages > kvm_nr_mmu_pages) {
1418 int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
1419 - kvm->arch.n_free_mmu_pages;
1420
1421 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
1422 struct kvm_mmu_page *page; 1423 struct kvm_mmu_page *page;
1423 1424
1424 page = container_of(kvm->arch.active_mmu_pages.prev, 1425 page = container_of(kvm->arch.active_mmu_pages.prev,
1425 struct kvm_mmu_page, link); 1426 struct kvm_mmu_page, link);
1426 kvm_mmu_zap_page(kvm, page); 1427 kvm_mmu_zap_page(kvm, page);
1427 n_used_mmu_pages--; 1428 used_pages--;
1428 } 1429 }
1429 kvm->arch.n_free_mmu_pages = 0; 1430 kvm->arch.n_free_mmu_pages = 0;
1430 } 1431 }