aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2007-12-20 19:18:26 -0500
committerAvi Kivity <avi@qumranet.com>2008-01-30 11:01:21 -0500
commitaaee2c94f7a1f7726e360a6cfb40173bd552bcff (patch)
treee9066ae5509c349bfd6a187e85d52cc476e16a12 /arch/x86/kvm/paging_tmpl.h
parentd7824fff896a1698a07a8046dc362f4500c302f7 (diff)
KVM: MMU: Switch to mmu spinlock
Convert the synchronization of the shadow handling to a separate mmu_lock spinlock. Also guard fetch() by mmap_sem in read-mode to protect against alias and memslot changes. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h10
1 files changed, 6 insertions, 4 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 3d7846ba26e1..a35b83a4fef2 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -387,7 +387,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
387 */ 387 */
388 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault, 388 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
389 fetch_fault); 389 fetch_fault);
390 up_read(&current->mm->mmap_sem);
391 390
392 /* 391 /*
393 * The page is not mapped by the guest. Let the guest handle it. 392 * The page is not mapped by the guest. Let the guest handle it.
@@ -396,12 +395,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
396 pgprintk("%s: guest page fault\n", __FUNCTION__); 395 pgprintk("%s: guest page fault\n", __FUNCTION__);
397 inject_page_fault(vcpu, addr, walker.error_code); 396 inject_page_fault(vcpu, addr, walker.error_code);
398 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ 397 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
398 up_read(&current->mm->mmap_sem);
399 return 0; 399 return 0;
400 } 400 }
401 401
402 page = gfn_to_page(vcpu->kvm, walker.gfn); 402 page = gfn_to_page(vcpu->kvm, walker.gfn);
403 403
404 mutex_lock(&vcpu->kvm->lock); 404 spin_lock(&vcpu->kvm->mmu_lock);
405 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, 405 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
406 &write_pt, page); 406 &write_pt, page);
407 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__, 407 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
@@ -414,13 +414,15 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
414 * mmio: emulate if accessible, otherwise its a guest fault. 414 * mmio: emulate if accessible, otherwise its a guest fault.
415 */ 415 */
416 if (shadow_pte && is_io_pte(*shadow_pte)) { 416 if (shadow_pte && is_io_pte(*shadow_pte)) {
417 mutex_unlock(&vcpu->kvm->lock); 417 spin_unlock(&vcpu->kvm->mmu_lock);
418 up_read(&current->mm->mmap_sem);
418 return 1; 419 return 1;
419 } 420 }
420 421
421 ++vcpu->stat.pf_fixed; 422 ++vcpu->stat.pf_fixed;
422 kvm_mmu_audit(vcpu, "post page fault (fixed)"); 423 kvm_mmu_audit(vcpu, "post page fault (fixed)");
423 mutex_unlock(&vcpu->kvm->lock); 424 spin_unlock(&vcpu->kvm->mmu_lock);
425 up_read(&current->mm->mmap_sem);
424 426
425 return write_pt; 427 return write_pt;
426} 428}