aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2007-12-20 19:18:22 -0500
committerAvi Kivity <avi@qumranet.com>2008-01-30 11:01:20 -0500
commit10589a4699bb978c781ce73bbae8ca942c5250c9 (patch)
tree5585ed87fff0a2ba259fcc6f998022481da75f68 /arch/x86/kvm/paging_tmpl.h
parent774ead3ad9bcbc05ef6aaebb9bdf8b4c3126923b (diff)
KVM: MMU: Concurrent guest walkers
Do not hold kvm->lock mutex across the entire pagefault code, only acquire it in places where it is necessary, such as mmu hash list, active list, rmap and parent pte handling. Allow concurrent guest walkers by switching walk_addr() to use mmap_sem in read-mode. And get rid of the lockless __gfn_to_page. [avi: move kvm_mmu_pte_write() locking inside the function] [avi: add locking for real mode] [avi: fix cmpxchg locking] Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h8
1 files changed, 7 insertions, 1 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 56b88f7e83ef..7f83f5557d5e 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -368,11 +368,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
368 if (r) 368 if (r)
369 return r; 369 return r;
370 370
371 down_read(&current->mm->mmap_sem);
371 /* 372 /*
372 * Look up the shadow pte for the faulting address. 373 * Look up the shadow pte for the faulting address.
373 */ 374 */
374 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault, 375 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
375 fetch_fault); 376 fetch_fault);
377 up_read(&current->mm->mmap_sem);
376 378
377 /* 379 /*
378 * The page is not mapped by the guest. Let the guest handle it. 380 * The page is not mapped by the guest. Let the guest handle it.
@@ -384,6 +386,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
384 return 0; 386 return 0;
385 } 387 }
386 388
389 mutex_lock(&vcpu->kvm->lock);
387 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, 390 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
388 &write_pt); 391 &write_pt);
389 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__, 392 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
@@ -395,11 +398,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
395 /* 398 /*
396 * mmio: emulate if accessible, otherwise its a guest fault. 399 * mmio: emulate if accessible, otherwise its a guest fault.
397 */ 400 */
398 if (shadow_pte && is_io_pte(*shadow_pte)) 401 if (shadow_pte && is_io_pte(*shadow_pte)) {
402 mutex_unlock(&vcpu->kvm->lock);
399 return 1; 403 return 1;
404 }
400 405
401 ++vcpu->stat.pf_fixed; 406 ++vcpu->stat.pf_fixed;
402 kvm_mmu_audit(vcpu, "post page fault (fixed)"); 407 kvm_mmu_audit(vcpu, "post page fault (fixed)");
408 mutex_unlock(&vcpu->kvm->lock);
403 409
404 return write_pt; 410 return write_pt;
405} 411}