aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorMarcelo Tosatti <marcelo@kvack.org>2008-03-29 19:17:59 -0400
committerAvi Kivity <avi@qumranet.com>2008-04-27 05:00:52 -0400
commit3200f405a1e8e06c8634f11d33614455baa4e6be (patch)
tree806116d2495dd7fd93b5c0db98a72fe4fa854787 /arch/x86/kvm/mmu.c
parent25c5f225beda4fbea878ed8b6203ab4ecc7de2d1 (diff)
KVM: MMU: unify slots_lock usage
Unify slots_lock acquision around vcpu_run(). This is simpler and less error-prone. Also fix some callsites that were not grabbing the lock properly. [avi: drop slots_lock while in guest mode to avoid holding the lock for indefinite periods] Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c13
1 files changed, 2 insertions, 11 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 6fc342194dda..c563283cb982 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1204,8 +1204,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1204 1204
1205 struct page *page; 1205 struct page *page;
1206 1206
1207 down_read(&vcpu->kvm->slots_lock);
1208
1209 down_read(&current->mm->mmap_sem); 1207 down_read(&current->mm->mmap_sem);
1210 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { 1208 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1211 gfn &= ~(KVM_PAGES_PER_HPAGE-1); 1209 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
@@ -1218,7 +1216,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1218 /* mmio */ 1216 /* mmio */
1219 if (is_error_page(page)) { 1217 if (is_error_page(page)) {
1220 kvm_release_page_clean(page); 1218 kvm_release_page_clean(page);
1221 up_read(&vcpu->kvm->slots_lock);
1222 return 1; 1219 return 1;
1223 } 1220 }
1224 1221
@@ -1228,7 +1225,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1228 PT32E_ROOT_LEVEL); 1225 PT32E_ROOT_LEVEL);
1229 spin_unlock(&vcpu->kvm->mmu_lock); 1226 spin_unlock(&vcpu->kvm->mmu_lock);
1230 1227
1231 up_read(&vcpu->kvm->slots_lock);
1232 1228
1233 return r; 1229 return r;
1234} 1230}
@@ -1376,9 +1372,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1376 largepage = 1; 1372 largepage = 1;
1377 } 1373 }
1378 page = gfn_to_page(vcpu->kvm, gfn); 1374 page = gfn_to_page(vcpu->kvm, gfn);
1375 up_read(&current->mm->mmap_sem);
1379 if (is_error_page(page)) { 1376 if (is_error_page(page)) {
1380 kvm_release_page_clean(page); 1377 kvm_release_page_clean(page);
1381 up_read(&current->mm->mmap_sem);
1382 return 1; 1378 return 1;
1383 } 1379 }
1384 spin_lock(&vcpu->kvm->mmu_lock); 1380 spin_lock(&vcpu->kvm->mmu_lock);
@@ -1386,7 +1382,6 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1386 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK, 1382 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
1387 largepage, gfn, page, TDP_ROOT_LEVEL); 1383 largepage, gfn, page, TDP_ROOT_LEVEL);
1388 spin_unlock(&vcpu->kvm->mmu_lock); 1384 spin_unlock(&vcpu->kvm->mmu_lock);
1389 up_read(&current->mm->mmap_sem);
1390 1385
1391 return r; 1386 return r;
1392} 1387}
@@ -1808,9 +1803,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1808 gpa_t gpa; 1803 gpa_t gpa;
1809 int r; 1804 int r;
1810 1805
1811 down_read(&vcpu->kvm->slots_lock);
1812 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); 1806 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1813 up_read(&vcpu->kvm->slots_lock);
1814 1807
1815 spin_lock(&vcpu->kvm->mmu_lock); 1808 spin_lock(&vcpu->kvm->mmu_lock);
1816 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); 1809 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
@@ -2063,7 +2056,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
2063 if (r) 2056 if (r)
2064 return r; 2057 return r;
2065 2058
2066 if (!__emulator_write_phys(vcpu, addr, &value, bytes)) 2059 if (!emulator_write_phys(vcpu, addr, &value, bytes))
2067 return -EFAULT; 2060 return -EFAULT;
2068 2061
2069 return 1; 2062 return 1;
@@ -2127,7 +2120,6 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
2127 int r; 2120 int r;
2128 struct kvm_pv_mmu_op_buffer buffer; 2121 struct kvm_pv_mmu_op_buffer buffer;
2129 2122
2130 down_read(&vcpu->kvm->slots_lock);
2131 down_read(&current->mm->mmap_sem); 2123 down_read(&current->mm->mmap_sem);
2132 2124
2133 buffer.ptr = buffer.buf; 2125 buffer.ptr = buffer.buf;
@@ -2150,7 +2142,6 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
2150out: 2142out:
2151 *ret = buffer.processed; 2143 *ret = buffer.processed;
2152 up_read(&current->mm->mmap_sem); 2144 up_read(&current->mm->mmap_sem);
2153 up_read(&vcpu->kvm->slots_lock);
2154 return r; 2145 return r;
2155} 2146}
2156 2147