aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-12-30 05:29:05 -0500
committerAvi Kivity <avi@qumranet.com>2008-01-30 11:01:21 -0500
commitd7824fff896a1698a07a8046dc362f4500c302f7 (patch)
tree249e23ec224bc621bea1ef24fa83f5a749d6b35b /arch/x86/kvm/mmu.c
parent7ec54588210df29ea637e6054489bc942c0ef371 (diff)
KVM: MMU: Avoid calling gfn_to_page() in mmu_set_spte()
Since gfn_to_page() is a sleeping function, and we want to make the core mmu spinlocked, we need to pass the page from the walker context (which can sleep) to the shadow context (which cannot). [marcelo: avoid recursive locking of mmap_sem] Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c55
1 files changed, 50 insertions, 5 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 3b91227969a5..c0b757be7b99 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -890,11 +890,10 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
890static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, 890static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
891 unsigned pt_access, unsigned pte_access, 891 unsigned pt_access, unsigned pte_access,
892 int user_fault, int write_fault, int dirty, 892 int user_fault, int write_fault, int dirty,
893 int *ptwrite, gfn_t gfn) 893 int *ptwrite, gfn_t gfn, struct page *page)
894{ 894{
895 u64 spte; 895 u64 spte;
896 int was_rmapped = is_rmap_pte(*shadow_pte); 896 int was_rmapped = is_rmap_pte(*shadow_pte);
897 struct page *page;
898 897
899 pgprintk("%s: spte %llx access %x write_fault %d" 898 pgprintk("%s: spte %llx access %x write_fault %d"
900 " user_fault %d gfn %lx\n", 899 " user_fault %d gfn %lx\n",
@@ -912,8 +911,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
912 if (!(pte_access & ACC_EXEC_MASK)) 911 if (!(pte_access & ACC_EXEC_MASK))
913 spte |= PT64_NX_MASK; 912 spte |= PT64_NX_MASK;
914 913
915 page = gfn_to_page(vcpu->kvm, gfn);
916
917 spte |= PT_PRESENT_MASK; 914 spte |= PT_PRESENT_MASK;
918 if (pte_access & ACC_USER_MASK) 915 if (pte_access & ACC_USER_MASK)
919 spte |= PT_USER_MASK; 916 spte |= PT_USER_MASK;
@@ -979,6 +976,11 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
979 int level = PT32E_ROOT_LEVEL; 976 int level = PT32E_ROOT_LEVEL;
980 hpa_t table_addr = vcpu->arch.mmu.root_hpa; 977 hpa_t table_addr = vcpu->arch.mmu.root_hpa;
981 int pt_write = 0; 978 int pt_write = 0;
979 struct page *page;
980
981 down_read(&current->mm->mmap_sem);
982 page = gfn_to_page(vcpu->kvm, gfn);
983 up_read(&current->mm->mmap_sem);
982 984
983 for (; ; level--) { 985 for (; ; level--) {
984 u32 index = PT64_INDEX(v, level); 986 u32 index = PT64_INDEX(v, level);
@@ -989,7 +991,7 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
989 991
990 if (level == 1) { 992 if (level == 1) {
991 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL, 993 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
992 0, write, 1, &pt_write, gfn); 994 0, write, 1, &pt_write, gfn, page);
993 return pt_write || is_io_pte(table[index]); 995 return pt_write || is_io_pte(table[index]);
994 } 996 }
995 997
@@ -1005,6 +1007,7 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1005 NULL); 1007 NULL);
1006 if (!new_table) { 1008 if (!new_table) {
1007 pgprintk("nonpaging_map: ENOMEM\n"); 1009 pgprintk("nonpaging_map: ENOMEM\n");
1010 kvm_release_page_clean(page);
1008 return -ENOMEM; 1011 return -ENOMEM;
1009 } 1012 }
1010 1013
@@ -1347,6 +1350,43 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1347 return !!(spte && (*spte & PT_ACCESSED_MASK)); 1350 return !!(spte && (*spte & PT_ACCESSED_MASK));
1348} 1351}
1349 1352
1353static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1354 const u8 *new, int bytes)
1355{
1356 gfn_t gfn;
1357 int r;
1358 u64 gpte = 0;
1359
1360 if (bytes != 4 && bytes != 8)
1361 return;
1362
1363 /*
1364 * Assume that the pte write on a page table of the same type
1365 * as the current vcpu paging mode. This is nearly always true
1366 * (might be false while changing modes). Note it is verified later
1367 * by update_pte().
1368 */
1369 if (is_pae(vcpu)) {
1370 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
1371 if ((bytes == 4) && (gpa % 4 == 0)) {
1372 r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
1373 if (r)
1374 return;
1375 memcpy((void *)&gpte + (gpa % 8), new, 4);
1376 } else if ((bytes == 8) && (gpa % 8 == 0)) {
1377 memcpy((void *)&gpte, new, 8);
1378 }
1379 } else {
1380 if ((bytes == 4) && (gpa % 4 == 0))
1381 memcpy((void *)&gpte, new, 4);
1382 }
1383 if (!is_present_pte(gpte))
1384 return;
1385 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
1386 vcpu->arch.update_pte.gfn = gfn;
1387 vcpu->arch.update_pte.page = gfn_to_page(vcpu->kvm, gfn);
1388}
1389
1350void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 1390void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1351 const u8 *new, int bytes) 1391 const u8 *new, int bytes)
1352{ 1392{
@@ -1367,6 +1407,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1367 int npte; 1407 int npte;
1368 1408
1369 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes); 1409 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1410 mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
1370 mutex_lock(&vcpu->kvm->lock); 1411 mutex_lock(&vcpu->kvm->lock);
1371 ++vcpu->kvm->stat.mmu_pte_write; 1412 ++vcpu->kvm->stat.mmu_pte_write;
1372 kvm_mmu_audit(vcpu, "pre pte write"); 1413 kvm_mmu_audit(vcpu, "pre pte write");
@@ -1437,6 +1478,10 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1437 } 1478 }
1438 kvm_mmu_audit(vcpu, "post pte write"); 1479 kvm_mmu_audit(vcpu, "post pte write");
1439 mutex_unlock(&vcpu->kvm->lock); 1480 mutex_unlock(&vcpu->kvm->lock);
1481 if (vcpu->arch.update_pte.page) {
1482 kvm_release_page_clean(vcpu->arch.update_pte.page);
1483 vcpu->arch.update_pte.page = NULL;
1484 }
1440} 1485}
1441 1486
1442int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) 1487int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)