aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2007-12-20 19:18:22 -0500
committerAvi Kivity <avi@qumranet.com>2008-01-30 11:01:20 -0500
commit10589a4699bb978c781ce73bbae8ca942c5250c9 (patch)
tree5585ed87fff0a2ba259fcc6f998022481da75f68 /arch/x86/kvm/mmu.c
parent774ead3ad9bcbc05ef6aaebb9bdf8b4c3126923b (diff)
KVM: MMU: Concurrent guest walkers
Do not hold kvm->lock mutex across the entire pagefault code, only acquire it in places where it is necessary, such as mmu hash list, active list, rmap and parent pte handling. Allow concurrent guest walkers by switching walk_addr() to use mmap_sem in read-mode. And get rid of the lockless __gfn_to_page. [avi: move kvm_mmu_pte_write() locking inside the function] [avi: add locking for real mode] [avi: fix cmpxchg locking] Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c41
1 files changed, 33 insertions, 8 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8f12ec52ad86..3b91227969a5 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -974,7 +974,7 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
974{ 974{
975} 975}
976 976
977static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) 977static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
978{ 978{
979 int level = PT32E_ROOT_LEVEL; 979 int level = PT32E_ROOT_LEVEL;
980 hpa_t table_addr = vcpu->arch.mmu.root_hpa; 980 hpa_t table_addr = vcpu->arch.mmu.root_hpa;
@@ -1015,6 +1015,17 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1015 } 1015 }
1016} 1016}
1017 1017
1018static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1019{
1020 int r;
1021
1022 mutex_lock(&vcpu->kvm->lock);
1023 r = __nonpaging_map(vcpu, v, write, gfn);
1024 mutex_unlock(&vcpu->kvm->lock);
1025 return r;
1026}
1027
1028
1018static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu, 1029static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
1019 struct kvm_mmu_page *sp) 1030 struct kvm_mmu_page *sp)
1020{ 1031{
@@ -1031,6 +1042,7 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
1031 1042
1032 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) 1043 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
1033 return; 1044 return;
1045 mutex_lock(&vcpu->kvm->lock);
1034#ifdef CONFIG_X86_64 1046#ifdef CONFIG_X86_64
1035 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { 1047 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1036 hpa_t root = vcpu->arch.mmu.root_hpa; 1048 hpa_t root = vcpu->arch.mmu.root_hpa;
@@ -1038,6 +1050,7 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
1038 sp = page_header(root); 1050 sp = page_header(root);
1039 --sp->root_count; 1051 --sp->root_count;
1040 vcpu->arch.mmu.root_hpa = INVALID_PAGE; 1052 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1053 mutex_unlock(&vcpu->kvm->lock);
1041 return; 1054 return;
1042 } 1055 }
1043#endif 1056#endif
@@ -1051,6 +1064,7 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
1051 } 1064 }
1052 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; 1065 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1053 } 1066 }
1067 mutex_unlock(&vcpu->kvm->lock);
1054 vcpu->arch.mmu.root_hpa = INVALID_PAGE; 1068 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1055} 1069}
1056 1070
@@ -1250,15 +1264,15 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
1250{ 1264{
1251 int r; 1265 int r;
1252 1266
1253 mutex_lock(&vcpu->kvm->lock);
1254 r = mmu_topup_memory_caches(vcpu); 1267 r = mmu_topup_memory_caches(vcpu);
1255 if (r) 1268 if (r)
1256 goto out; 1269 goto out;
1270 mutex_lock(&vcpu->kvm->lock);
1257 mmu_alloc_roots(vcpu); 1271 mmu_alloc_roots(vcpu);
1272 mutex_unlock(&vcpu->kvm->lock);
1258 kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa); 1273 kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
1259 kvm_mmu_flush_tlb(vcpu); 1274 kvm_mmu_flush_tlb(vcpu);
1260out: 1275out:
1261 mutex_unlock(&vcpu->kvm->lock);
1262 return r; 1276 return r;
1263} 1277}
1264EXPORT_SYMBOL_GPL(kvm_mmu_load); 1278EXPORT_SYMBOL_GPL(kvm_mmu_load);
@@ -1353,6 +1367,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1353 int npte; 1367 int npte;
1354 1368
1355 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes); 1369 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1370 mutex_lock(&vcpu->kvm->lock);
1356 ++vcpu->kvm->stat.mmu_pte_write; 1371 ++vcpu->kvm->stat.mmu_pte_write;
1357 kvm_mmu_audit(vcpu, "pre pte write"); 1372 kvm_mmu_audit(vcpu, "pre pte write");
1358 if (gfn == vcpu->arch.last_pt_write_gfn 1373 if (gfn == vcpu->arch.last_pt_write_gfn
@@ -1421,17 +1436,27 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1421 } 1436 }
1422 } 1437 }
1423 kvm_mmu_audit(vcpu, "post pte write"); 1438 kvm_mmu_audit(vcpu, "post pte write");
1439 mutex_unlock(&vcpu->kvm->lock);
1424} 1440}
1425 1441
1426int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) 1442int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1427{ 1443{
1428 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); 1444 gpa_t gpa;
1445 int r;
1429 1446
1430 return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); 1447 down_read(&current->mm->mmap_sem);
1448 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1449 up_read(&current->mm->mmap_sem);
1450
1451 mutex_lock(&vcpu->kvm->lock);
1452 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1453 mutex_unlock(&vcpu->kvm->lock);
1454 return r;
1431} 1455}
1432 1456
1433void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 1457void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1434{ 1458{
1459 mutex_lock(&vcpu->kvm->lock);
1435 while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) { 1460 while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
1436 struct kvm_mmu_page *sp; 1461 struct kvm_mmu_page *sp;
1437 1462
@@ -1440,6 +1465,7 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1440 kvm_mmu_zap_page(vcpu->kvm, sp); 1465 kvm_mmu_zap_page(vcpu->kvm, sp);
1441 ++vcpu->kvm->stat.mmu_recycled; 1466 ++vcpu->kvm->stat.mmu_recycled;
1442 } 1467 }
1468 mutex_unlock(&vcpu->kvm->lock);
1443} 1469}
1444 1470
1445int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code) 1471int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
@@ -1447,7 +1473,6 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1447 int r; 1473 int r;
1448 enum emulation_result er; 1474 enum emulation_result er;
1449 1475
1450 mutex_lock(&vcpu->kvm->lock);
1451 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code); 1476 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
1452 if (r < 0) 1477 if (r < 0)
1453 goto out; 1478 goto out;
@@ -1462,7 +1487,6 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1462 goto out; 1487 goto out;
1463 1488
1464 er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0); 1489 er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
1465 mutex_unlock(&vcpu->kvm->lock);
1466 1490
1467 switch (er) { 1491 switch (er) {
1468 case EMULATE_DONE: 1492 case EMULATE_DONE:
@@ -1477,7 +1501,6 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1477 BUG(); 1501 BUG();
1478 } 1502 }
1479out: 1503out:
1480 mutex_unlock(&vcpu->kvm->lock);
1481 return r; 1504 return r;
1482} 1505}
1483EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); 1506EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
@@ -1574,8 +1597,10 @@ void kvm_mmu_zap_all(struct kvm *kvm)
1574{ 1597{
1575 struct kvm_mmu_page *sp, *node; 1598 struct kvm_mmu_page *sp, *node;
1576 1599
1600 mutex_lock(&kvm->lock);
1577 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) 1601 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
1578 kvm_mmu_zap_page(kvm, sp); 1602 kvm_mmu_zap_page(kvm, sp);
1603 mutex_unlock(&kvm->lock);
1579 1604
1580 kvm_flush_remote_tlbs(kvm); 1605 kvm_flush_remote_tlbs(kvm);
1581} 1606}