aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2010-05-15 06:52:34 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 03:35:50 -0400
commite02aa901b1aa41fb541521800cc2a4774c162485 (patch)
treec2b19ce292c996b25166a9a123ae848f7dfcd8e4 /arch/x86/kvm/mmu.c
parent1d9dc7e000915b9607b480e34fcb4238b789fbb1 (diff)
KVM: MMU: don't write-protect if have new mapping to unsync page
Two cases maybe happen in kvm_mmu_get_page() function: - one case is, the goal sp is already in cache, if the sp is unsync, we only need update it to assure this mapping is valid, but not mark it sync and not write-protect sp->gfn since it not broke unsync rule(one shadow page for a gfn) - another case is, the goal sp not existed, we need create a new sp for gfn, i.e, gfn (may)has another shadow page, to keep unsync rule, we should sync(mark sync and write-protect) gfn's unsync shadow page. After enabling multiple unsync shadows, we sync those shadow pages only when the new sp not allow to become unsync(also for the unsyc rule, the new rule is: allow all pte page become unsync) Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c18
1 files changed, 14 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ef5d140a2705..064ddfbde108 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1337,7 +1337,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1337 unsigned index; 1337 unsigned index;
1338 unsigned quadrant; 1338 unsigned quadrant;
1339 struct hlist_head *bucket; 1339 struct hlist_head *bucket;
1340 struct kvm_mmu_page *sp; 1340 struct kvm_mmu_page *sp, *unsync_sp = NULL;
1341 struct hlist_node *node, *tmp; 1341 struct hlist_node *node, *tmp;
1342 1342
1343 role = vcpu->arch.mmu.base_role; 1343 role = vcpu->arch.mmu.base_role;
@@ -1356,20 +1356,30 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1356 hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link) 1356 hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
1357 if (sp->gfn == gfn) { 1357 if (sp->gfn == gfn) {
1358 if (sp->unsync) 1358 if (sp->unsync)
1359 if (kvm_sync_page(vcpu, sp)) 1359 unsync_sp = sp;
1360 continue;
1361 1360
1362 if (sp->role.word != role.word) 1361 if (sp->role.word != role.word)
1363 continue; 1362 continue;
1364 1363
1364 if (!direct && unsync_sp &&
1365 kvm_sync_page_transient(vcpu, unsync_sp)) {
1366 unsync_sp = NULL;
1367 break;
1368 }
1369
1365 mmu_page_add_parent_pte(vcpu, sp, parent_pte); 1370 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
1366 if (sp->unsync_children) { 1371 if (sp->unsync_children) {
1367 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); 1372 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
1368 kvm_mmu_mark_parents_unsync(sp); 1373 kvm_mmu_mark_parents_unsync(sp);
1369 } 1374 } else if (sp->unsync)
1375 kvm_mmu_mark_parents_unsync(sp);
1376
1370 trace_kvm_mmu_get_page(sp, false); 1377 trace_kvm_mmu_get_page(sp, false);
1371 return sp; 1378 return sp;
1372 } 1379 }
1380 if (!direct && unsync_sp)
1381 kvm_sync_page(vcpu, unsync_sp);
1382
1373 ++vcpu->kvm->stat.mmu_cache_miss; 1383 ++vcpu->kvm->stat.mmu_cache_miss;
1374 sp = kvm_mmu_alloc_page(vcpu, parent_pte); 1384 sp = kvm_mmu_alloc_page(vcpu, parent_pte);
1375 if (!sp) 1385 if (!sp)