aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-12-31 08:27:49 -0500
committerAvi Kivity <avi@qumranet.com>2008-01-30 11:01:21 -0500
commiteb787d10af8045dd00d4d4c9a8e90fa495f1b0c1 (patch)
tree6594a4f1ba3718d01a8682aeadb31a0f61ae6f86 /arch/x86/kvm/paging_tmpl.h
parentaaee2c94f7a1f7726e360a6cfb40173bd552bcff (diff)
KVM: MMU: Move kvm_free_some_pages() into critical section
If some other cpu steals mmu pages between our check and an attempt to allocate, we can run out of mmu pages. Fix by moving the check into the same critical section as the allocation. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h1
1 files changed, 1 insertions, 0 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index a35b83a4fef2..349920556be3 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -402,6 +402,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
402 page = gfn_to_page(vcpu->kvm, walker.gfn); 402 page = gfn_to_page(vcpu->kvm, walker.gfn);
403 403
404 spin_lock(&vcpu->kvm->mmu_lock); 404 spin_lock(&vcpu->kvm->mmu_lock);
405 kvm_mmu_free_some_pages(vcpu);
405 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, 406 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
406 &write_pt, page); 407 &write_pt, page);
407 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__, 408 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,