aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-12-30 05:29:05 -0500
committerAvi Kivity <avi@qumranet.com>2008-01-30 11:01:21 -0500
commitd7824fff896a1698a07a8046dc362f4500c302f7 (patch)
tree249e23ec224bc621bea1ef24fa83f5a749d6b35b
parent7ec54588210df29ea637e6054489bc942c0ef371 (diff)
KVM: MMU: Avoid calling gfn_to_page() in mmu_set_spte()
Since gfn_to_page() is a sleeping function, and we want to make the core mmu spinlocked, we need to pass the page from the walker context (which can sleep) to the shadow context (which cannot). [marcelo: avoid recursive locking of mmap_sem] Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r--arch/x86/kvm/mmu.c55
-rw-r--r--arch/x86/kvm/paging_tmpl.h23
-rw-r--r--include/asm-x86/kvm_host.h5
3 files changed, 73 insertions, 10 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 3b91227969a5..c0b757be7b99 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -890,11 +890,10 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
890static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, 890static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
891 unsigned pt_access, unsigned pte_access, 891 unsigned pt_access, unsigned pte_access,
892 int user_fault, int write_fault, int dirty, 892 int user_fault, int write_fault, int dirty,
893 int *ptwrite, gfn_t gfn) 893 int *ptwrite, gfn_t gfn, struct page *page)
894{ 894{
895 u64 spte; 895 u64 spte;
896 int was_rmapped = is_rmap_pte(*shadow_pte); 896 int was_rmapped = is_rmap_pte(*shadow_pte);
897 struct page *page;
898 897
899 pgprintk("%s: spte %llx access %x write_fault %d" 898 pgprintk("%s: spte %llx access %x write_fault %d"
900 " user_fault %d gfn %lx\n", 899 " user_fault %d gfn %lx\n",
@@ -912,8 +911,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
912 if (!(pte_access & ACC_EXEC_MASK)) 911 if (!(pte_access & ACC_EXEC_MASK))
913 spte |= PT64_NX_MASK; 912 spte |= PT64_NX_MASK;
914 913
915 page = gfn_to_page(vcpu->kvm, gfn);
916
917 spte |= PT_PRESENT_MASK; 914 spte |= PT_PRESENT_MASK;
918 if (pte_access & ACC_USER_MASK) 915 if (pte_access & ACC_USER_MASK)
919 spte |= PT_USER_MASK; 916 spte |= PT_USER_MASK;
@@ -979,6 +976,11 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
979 int level = PT32E_ROOT_LEVEL; 976 int level = PT32E_ROOT_LEVEL;
980 hpa_t table_addr = vcpu->arch.mmu.root_hpa; 977 hpa_t table_addr = vcpu->arch.mmu.root_hpa;
981 int pt_write = 0; 978 int pt_write = 0;
979 struct page *page;
980
981 down_read(&current->mm->mmap_sem);
982 page = gfn_to_page(vcpu->kvm, gfn);
983 up_read(&current->mm->mmap_sem);
982 984
983 for (; ; level--) { 985 for (; ; level--) {
984 u32 index = PT64_INDEX(v, level); 986 u32 index = PT64_INDEX(v, level);
@@ -989,7 +991,7 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
989 991
990 if (level == 1) { 992 if (level == 1) {
991 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL, 993 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
992 0, write, 1, &pt_write, gfn); 994 0, write, 1, &pt_write, gfn, page);
993 return pt_write || is_io_pte(table[index]); 995 return pt_write || is_io_pte(table[index]);
994 } 996 }
995 997
@@ -1005,6 +1007,7 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1005 NULL); 1007 NULL);
1006 if (!new_table) { 1008 if (!new_table) {
1007 pgprintk("nonpaging_map: ENOMEM\n"); 1009 pgprintk("nonpaging_map: ENOMEM\n");
1010 kvm_release_page_clean(page);
1008 return -ENOMEM; 1011 return -ENOMEM;
1009 } 1012 }
1010 1013
@@ -1347,6 +1350,43 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1347 return !!(spte && (*spte & PT_ACCESSED_MASK)); 1350 return !!(spte && (*spte & PT_ACCESSED_MASK));
1348} 1351}
1349 1352
1353static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1354 const u8 *new, int bytes)
1355{
1356 gfn_t gfn;
1357 int r;
1358 u64 gpte = 0;
1359
1360 if (bytes != 4 && bytes != 8)
1361 return;
1362
1363 /*
1364 * Assume that the pte write on a page table of the same type
1365 * as the current vcpu paging mode. This is nearly always true
1366 * (might be false while changing modes). Note it is verified later
1367 * by update_pte().
1368 */
1369 if (is_pae(vcpu)) {
1370 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
1371 if ((bytes == 4) && (gpa % 4 == 0)) {
1372 r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
1373 if (r)
1374 return;
1375 memcpy((void *)&gpte + (gpa % 8), new, 4);
1376 } else if ((bytes == 8) && (gpa % 8 == 0)) {
1377 memcpy((void *)&gpte, new, 8);
1378 }
1379 } else {
1380 if ((bytes == 4) && (gpa % 4 == 0))
1381 memcpy((void *)&gpte, new, 4);
1382 }
1383 if (!is_present_pte(gpte))
1384 return;
1385 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
1386 vcpu->arch.update_pte.gfn = gfn;
1387 vcpu->arch.update_pte.page = gfn_to_page(vcpu->kvm, gfn);
1388}
1389
1350void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 1390void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1351 const u8 *new, int bytes) 1391 const u8 *new, int bytes)
1352{ 1392{
@@ -1367,6 +1407,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1367 int npte; 1407 int npte;
1368 1408
1369 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes); 1409 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1410 mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
1370 mutex_lock(&vcpu->kvm->lock); 1411 mutex_lock(&vcpu->kvm->lock);
1371 ++vcpu->kvm->stat.mmu_pte_write; 1412 ++vcpu->kvm->stat.mmu_pte_write;
1372 kvm_mmu_audit(vcpu, "pre pte write"); 1413 kvm_mmu_audit(vcpu, "pre pte write");
@@ -1437,6 +1478,10 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1437 } 1478 }
1438 kvm_mmu_audit(vcpu, "post pte write"); 1479 kvm_mmu_audit(vcpu, "post pte write");
1439 mutex_unlock(&vcpu->kvm->lock); 1480 mutex_unlock(&vcpu->kvm->lock);
1481 if (vcpu->arch.update_pte.page) {
1482 kvm_release_page_clean(vcpu->arch.update_pte.page);
1483 vcpu->arch.update_pte.page = NULL;
1484 }
1440} 1485}
1441 1486
1442int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) 1487int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 136a65d72b0a..3d7846ba26e1 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -245,6 +245,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
245{ 245{
246 pt_element_t gpte; 246 pt_element_t gpte;
247 unsigned pte_access; 247 unsigned pte_access;
248 struct page *npage;
248 249
249 gpte = *(const pt_element_t *)pte; 250 gpte = *(const pt_element_t *)pte;
250 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) { 251 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
@@ -256,8 +257,14 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
256 return; 257 return;
257 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte); 258 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
258 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte); 259 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
260 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
261 return;
262 npage = vcpu->arch.update_pte.page;
263 if (!npage)
264 return;
265 get_page(npage);
259 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, 266 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
260 gpte & PT_DIRTY_MASK, NULL, gpte_to_gfn(gpte)); 267 gpte & PT_DIRTY_MASK, NULL, gpte_to_gfn(gpte), npage);
261} 268}
262 269
263/* 270/*
@@ -265,7 +272,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
265 */ 272 */
266static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, 273static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
267 struct guest_walker *walker, 274 struct guest_walker *walker,
268 int user_fault, int write_fault, int *ptwrite) 275 int user_fault, int write_fault, int *ptwrite,
276 struct page *page)
269{ 277{
270 hpa_t shadow_addr; 278 hpa_t shadow_addr;
271 int level; 279 int level;
@@ -321,8 +329,10 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
321 r = kvm_read_guest_atomic(vcpu->kvm, 329 r = kvm_read_guest_atomic(vcpu->kvm,
322 walker->pte_gpa[level - 2], 330 walker->pte_gpa[level - 2],
323 &curr_pte, sizeof(curr_pte)); 331 &curr_pte, sizeof(curr_pte));
324 if (r || curr_pte != walker->ptes[level - 2]) 332 if (r || curr_pte != walker->ptes[level - 2]) {
333 kvm_release_page_clean(page);
325 return NULL; 334 return NULL;
335 }
326 } 336 }
327 shadow_addr = __pa(shadow_page->spt); 337 shadow_addr = __pa(shadow_page->spt);
328 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK 338 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
@@ -333,7 +343,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
333 mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access, 343 mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
334 user_fault, write_fault, 344 user_fault, write_fault,
335 walker->ptes[walker->level-1] & PT_DIRTY_MASK, 345 walker->ptes[walker->level-1] & PT_DIRTY_MASK,
336 ptwrite, walker->gfn); 346 ptwrite, walker->gfn, page);
337 347
338 return shadow_ent; 348 return shadow_ent;
339} 349}
@@ -362,6 +372,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
362 u64 *shadow_pte; 372 u64 *shadow_pte;
363 int write_pt = 0; 373 int write_pt = 0;
364 int r; 374 int r;
375 struct page *page;
365 376
366 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code); 377 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
367 kvm_mmu_audit(vcpu, "pre page fault"); 378 kvm_mmu_audit(vcpu, "pre page fault");
@@ -388,9 +399,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
388 return 0; 399 return 0;
389 } 400 }
390 401
402 page = gfn_to_page(vcpu->kvm, walker.gfn);
403
391 mutex_lock(&vcpu->kvm->lock); 404 mutex_lock(&vcpu->kvm->lock);
392 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, 405 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
393 &write_pt); 406 &write_pt, page);
394 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__, 407 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
395 shadow_pte, *shadow_pte, write_pt); 408 shadow_pte, *shadow_pte, write_pt);
396 409
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 44b89259f6c4..20597bc16744 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -224,6 +224,11 @@ struct kvm_vcpu_arch {
224 int last_pt_write_count; 224 int last_pt_write_count;
225 u64 *last_pte_updated; 225 u64 *last_pte_updated;
226 226
227 struct {
228 gfn_t gfn; /* presumed gfn during guest pte update */
229 struct page *page; /* page corresponding to that gfn */
230 } update_pte;
231
227 struct i387_fxsave_struct host_fx_image; 232 struct i387_fxsave_struct host_fx_image;
228 struct i387_fxsave_struct guest_fx_image; 233 struct i387_fxsave_struct guest_fx_image;
229 234