aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-05-10 05:09:56 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 03:35:41 -0400
commitf0f5933a1626c8df7b0bfd227819c66320fb4f0f (patch)
tree7ab29539ca98bf11aa3affdfb66d1f2aa55b0214
parent6d77dbfc88e37c9efd5c5dd18445cfe819ae17ea (diff)
KVM: MMU: Fix free memory accounting race in mmu_alloc_roots()
We drop the mmu lock between freeing memory and allocating the roots; this allows some other vcpu to sneak in and allocate memory. While the race is benign (resulting only in temporary overallocation, not oom) it is simple and easy to fix by moving the freeing close to the allocation. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r--arch/x86/kvm/mmu.c5
1 files changed, 2 insertions, 3 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4a02dee1f2b5..d7aebafffdfe 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2094,6 +2094,7 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2094 root_gfn = 0; 2094 root_gfn = 0;
2095 } 2095 }
2096 spin_lock(&vcpu->kvm->mmu_lock); 2096 spin_lock(&vcpu->kvm->mmu_lock);
2097 kvm_mmu_free_some_pages(vcpu->kvm);
2097 sp = kvm_mmu_get_page(vcpu, root_gfn, 0, 2098 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
2098 PT64_ROOT_LEVEL, direct, 2099 PT64_ROOT_LEVEL, direct,
2099 ACC_ALL, NULL); 2100 ACC_ALL, NULL);
@@ -2124,6 +2125,7 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2124 root_gfn = i << 30; 2125 root_gfn = i << 30;
2125 } 2126 }
2126 spin_lock(&vcpu->kvm->mmu_lock); 2127 spin_lock(&vcpu->kvm->mmu_lock);
2128 kvm_mmu_free_some_pages(vcpu->kvm);
2127 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, 2129 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
2128 PT32_ROOT_LEVEL, direct, 2130 PT32_ROOT_LEVEL, direct,
2129 ACC_ALL, NULL); 2131 ACC_ALL, NULL);
@@ -2496,9 +2498,6 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
2496 r = mmu_topup_memory_caches(vcpu); 2498 r = mmu_topup_memory_caches(vcpu);
2497 if (r) 2499 if (r)
2498 goto out; 2500 goto out;
2499 spin_lock(&vcpu->kvm->mmu_lock);
2500 kvm_mmu_free_some_pages(vcpu);
2501 spin_unlock(&vcpu->kvm->mmu_lock);
2502 r = mmu_alloc_roots(vcpu); 2501 r = mmu_alloc_roots(vcpu);
2503 spin_lock(&vcpu->kvm->mmu_lock); 2502 spin_lock(&vcpu->kvm->mmu_lock);
2504 mmu_sync_roots(vcpu); 2503 mmu_sync_roots(vcpu);