aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2007-12-20 19:18:22 -0500
committerAvi Kivity <avi@qumranet.com>2008-01-30 11:01:20 -0500
commit10589a4699bb978c781ce73bbae8ca942c5250c9 (patch)
tree5585ed87fff0a2ba259fcc6f998022481da75f68 /virt/kvm/kvm_main.c
parent774ead3ad9bcbc05ef6aaebb9bdf8b4c3126923b (diff)
KVM: MMU: Concurrent guest walkers
Do not hold kvm->lock mutex across the entire pagefault code, only acquire it in places where it is necessary, such as mmu hash list, active list, rmap and parent pte handling. Allow concurrent guest walkers by switching walk_addr() to use mmap_sem in read-mode. And get rid of the lockless __gfn_to_page. [avi: move kvm_mmu_pte_write() locking inside the function] [avi: add locking for real mode] [avi: fix cmpxchg locking] Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c22
1 files changed, 5 insertions, 17 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 4026d7d64296..678e80561b74 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -227,7 +227,7 @@ static int kvm_vm_release(struct inode *inode, struct file *filp)
227 * 227 *
228 * Discontiguous memory is allowed, mostly for framebuffers. 228 * Discontiguous memory is allowed, mostly for framebuffers.
229 * 229 *
230 * Must be called holding kvm->lock. 230 * Must be called holding mmap_sem for write.
231 */ 231 */
232int __kvm_set_memory_region(struct kvm *kvm, 232int __kvm_set_memory_region(struct kvm *kvm,
233 struct kvm_userspace_memory_region *mem, 233 struct kvm_userspace_memory_region *mem,
@@ -338,9 +338,9 @@ int kvm_set_memory_region(struct kvm *kvm,
338{ 338{
339 int r; 339 int r;
340 340
341 mutex_lock(&kvm->lock); 341 down_write(&current->mm->mmap_sem);
342 r = __kvm_set_memory_region(kvm, mem, user_alloc); 342 r = __kvm_set_memory_region(kvm, mem, user_alloc);
343 mutex_unlock(&kvm->lock); 343 up_write(&current->mm->mmap_sem);
344 return r; 344 return r;
345} 345}
346EXPORT_SYMBOL_GPL(kvm_set_memory_region); 346EXPORT_SYMBOL_GPL(kvm_set_memory_region);
@@ -456,7 +456,7 @@ static unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
456/* 456/*
457 * Requires current->mm->mmap_sem to be held 457 * Requires current->mm->mmap_sem to be held
458 */ 458 */
459static struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn) 459struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
460{ 460{
461 struct page *page[1]; 461 struct page *page[1];
462 unsigned long addr; 462 unsigned long addr;
@@ -481,17 +481,6 @@ static struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn)
481 return page[0]; 481 return page[0];
482} 482}
483 483
484struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
485{
486 struct page *page;
487
488 down_read(&current->mm->mmap_sem);
489 page = __gfn_to_page(kvm, gfn);
490 up_read(&current->mm->mmap_sem);
491
492 return page;
493}
494
495EXPORT_SYMBOL_GPL(gfn_to_page); 484EXPORT_SYMBOL_GPL(gfn_to_page);
496 485
497void kvm_release_page_clean(struct page *page) 486void kvm_release_page_clean(struct page *page)
@@ -977,8 +966,7 @@ static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
977 966
978 if (!kvm_is_visible_gfn(kvm, vmf->pgoff)) 967 if (!kvm_is_visible_gfn(kvm, vmf->pgoff))
979 return VM_FAULT_SIGBUS; 968 return VM_FAULT_SIGBUS;
980 /* current->mm->mmap_sem is already held so call lockless version */ 969 page = gfn_to_page(kvm, vmf->pgoff);
981 page = __gfn_to_page(kvm, vmf->pgoff);
982 if (is_error_page(page)) { 970 if (is_error_page(page)) {
983 kvm_release_page_clean(page); 971 kvm_release_page_clean(page);
984 return VM_FAULT_SIGBUS; 972 return VM_FAULT_SIGBUS;