aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorIzik Eidus <izike@qumranet.com>2008-02-10 11:04:15 -0500
committerAvi Kivity <avi@qumranet.com>2008-03-04 08:19:40 -0500
commit72dc67a69690288538142df73a7e3ac66fea68dc (patch)
treef40cc5ef0c66686a469977fd438e5b6786f16280 /arch/x86/kvm/mmu.c
parentc7ac679c160db864810920df61a6ed14275011aa (diff)
KVM: remove the usage of the mmap_sem for the protection of the memory slots.
This patch replaces the mmap_sem lock for the memory slots with a new kvm private lock, it is needed beacuse untill now there were cases where kvm accesses user memory while holding the mmap semaphore. Signed-off-by: Izik Eidus <izike@qumranet.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c24
1 files changed, 20 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8efdcdbebb03..26037106ad19 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -876,11 +876,18 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
876 876
877struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) 877struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
878{ 878{
879 struct page *page;
880
879 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); 881 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
880 882
881 if (gpa == UNMAPPED_GVA) 883 if (gpa == UNMAPPED_GVA)
882 return NULL; 884 return NULL;
883 return gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); 885
886 down_read(&current->mm->mmap_sem);
887 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
888 up_read(&current->mm->mmap_sem);
889
890 return page;
884} 891}
885 892
886static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, 893static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
@@ -1020,15 +1027,18 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1020 1027
1021 struct page *page; 1028 struct page *page;
1022 1029
1030 down_read(&vcpu->kvm->slots_lock);
1031
1023 down_read(&current->mm->mmap_sem); 1032 down_read(&current->mm->mmap_sem);
1024 page = gfn_to_page(vcpu->kvm, gfn); 1033 page = gfn_to_page(vcpu->kvm, gfn);
1034 up_read(&current->mm->mmap_sem);
1025 1035
1026 spin_lock(&vcpu->kvm->mmu_lock); 1036 spin_lock(&vcpu->kvm->mmu_lock);
1027 kvm_mmu_free_some_pages(vcpu); 1037 kvm_mmu_free_some_pages(vcpu);
1028 r = __nonpaging_map(vcpu, v, write, gfn, page); 1038 r = __nonpaging_map(vcpu, v, write, gfn, page);
1029 spin_unlock(&vcpu->kvm->mmu_lock); 1039 spin_unlock(&vcpu->kvm->mmu_lock);
1030 1040
1031 up_read(&current->mm->mmap_sem); 1041 up_read(&vcpu->kvm->slots_lock);
1032 1042
1033 return r; 1043 return r;
1034} 1044}
@@ -1362,6 +1372,7 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1362 gfn_t gfn; 1372 gfn_t gfn;
1363 int r; 1373 int r;
1364 u64 gpte = 0; 1374 u64 gpte = 0;
1375 struct page *page;
1365 1376
1366 if (bytes != 4 && bytes != 8) 1377 if (bytes != 4 && bytes != 8)
1367 return; 1378 return;
@@ -1389,6 +1400,11 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1389 if (!is_present_pte(gpte)) 1400 if (!is_present_pte(gpte))
1390 return; 1401 return;
1391 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; 1402 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
1403
1404 down_read(&current->mm->mmap_sem);
1405 page = gfn_to_page(vcpu->kvm, gfn);
1406 up_read(&current->mm->mmap_sem);
1407
1392 vcpu->arch.update_pte.gfn = gfn; 1408 vcpu->arch.update_pte.gfn = gfn;
1393 vcpu->arch.update_pte.page = gfn_to_page(vcpu->kvm, gfn); 1409 vcpu->arch.update_pte.page = gfn_to_page(vcpu->kvm, gfn);
1394} 1410}
@@ -1496,9 +1512,9 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1496 gpa_t gpa; 1512 gpa_t gpa;
1497 int r; 1513 int r;
1498 1514
1499 down_read(&current->mm->mmap_sem); 1515 down_read(&vcpu->kvm->slots_lock);
1500 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); 1516 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1501 up_read(&current->mm->mmap_sem); 1517 up_read(&vcpu->kvm->slots_lock);
1502 1518
1503 spin_lock(&vcpu->kvm->mmu_lock); 1519 spin_lock(&vcpu->kvm->mmu_lock);
1504 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); 1520 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);