aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorIzik Eidus <izike@qumranet.com>2008-02-10 11:04:15 -0500
committerAvi Kivity <avi@qumranet.com>2008-03-04 08:19:40 -0500
commit72dc67a69690288538142df73a7e3ac66fea68dc (patch)
treef40cc5ef0c66686a469977fd438e5b6786f16280 /arch/x86/kvm/paging_tmpl.h
parentc7ac679c160db864810920df61a6ed14275011aa (diff)
KVM: remove the usage of the mmap_sem for the protection of the memory slots.
This patch replaces the mmap_sem lock for the memory slots with a new kvm private lock, it is needed beacuse untill now there were cases where kvm accesses user memory while holding the mmap semaphore. Signed-off-by: Izik Eidus <izike@qumranet.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h13
1 files changed, 9 insertions, 4 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 03ba8608fe0f..2009c6e9dc4d 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -91,7 +91,10 @@ static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
91 pt_element_t *table; 91 pt_element_t *table;
92 struct page *page; 92 struct page *page;
93 93
94 down_read(&current->mm->mmap_sem);
94 page = gfn_to_page(kvm, table_gfn); 95 page = gfn_to_page(kvm, table_gfn);
96 up_read(&current->mm->mmap_sem);
97
95 table = kmap_atomic(page, KM_USER0); 98 table = kmap_atomic(page, KM_USER0);
96 99
97 ret = CMPXCHG(&table[index], orig_pte, new_pte); 100 ret = CMPXCHG(&table[index], orig_pte, new_pte);
@@ -378,7 +381,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
378 if (r) 381 if (r)
379 return r; 382 return r;
380 383
381 down_read(&current->mm->mmap_sem); 384 down_read(&vcpu->kvm->slots_lock);
382 /* 385 /*
383 * Look up the shadow pte for the faulting address. 386 * Look up the shadow pte for the faulting address.
384 */ 387 */
@@ -392,11 +395,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
392 pgprintk("%s: guest page fault\n", __FUNCTION__); 395 pgprintk("%s: guest page fault\n", __FUNCTION__);
393 inject_page_fault(vcpu, addr, walker.error_code); 396 inject_page_fault(vcpu, addr, walker.error_code);
394 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ 397 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
395 up_read(&current->mm->mmap_sem); 398 up_read(&vcpu->kvm->slots_lock);
396 return 0; 399 return 0;
397 } 400 }
398 401
402 down_read(&current->mm->mmap_sem);
399 page = gfn_to_page(vcpu->kvm, walker.gfn); 403 page = gfn_to_page(vcpu->kvm, walker.gfn);
404 up_read(&current->mm->mmap_sem);
400 405
401 spin_lock(&vcpu->kvm->mmu_lock); 406 spin_lock(&vcpu->kvm->mmu_lock);
402 kvm_mmu_free_some_pages(vcpu); 407 kvm_mmu_free_some_pages(vcpu);
@@ -413,14 +418,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
413 */ 418 */
414 if (shadow_pte && is_io_pte(*shadow_pte)) { 419 if (shadow_pte && is_io_pte(*shadow_pte)) {
415 spin_unlock(&vcpu->kvm->mmu_lock); 420 spin_unlock(&vcpu->kvm->mmu_lock);
416 up_read(&current->mm->mmap_sem); 421 up_read(&vcpu->kvm->slots_lock);
417 return 1; 422 return 1;
418 } 423 }
419 424
420 ++vcpu->stat.pf_fixed; 425 ++vcpu->stat.pf_fixed;
421 kvm_mmu_audit(vcpu, "post page fault (fixed)"); 426 kvm_mmu_audit(vcpu, "post page fault (fixed)");
422 spin_unlock(&vcpu->kvm->mmu_lock); 427 spin_unlock(&vcpu->kvm->mmu_lock);
423 up_read(&current->mm->mmap_sem); 428 up_read(&vcpu->kvm->slots_lock);
424 429
425 return write_pt; 430 return write_pt;
426} 431}