diff options
| -rw-r--r-- | arch/x86/kvm/mmu.c | 18 |
1 files changed, 14 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index ef5d140a2705..064ddfbde108 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -1337,7 +1337,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
| 1337 | unsigned index; | 1337 | unsigned index; |
| 1338 | unsigned quadrant; | 1338 | unsigned quadrant; |
| 1339 | struct hlist_head *bucket; | 1339 | struct hlist_head *bucket; |
| 1340 | struct kvm_mmu_page *sp; | 1340 | struct kvm_mmu_page *sp, *unsync_sp = NULL; |
| 1341 | struct hlist_node *node, *tmp; | 1341 | struct hlist_node *node, *tmp; |
| 1342 | 1342 | ||
| 1343 | role = vcpu->arch.mmu.base_role; | 1343 | role = vcpu->arch.mmu.base_role; |
| @@ -1356,20 +1356,30 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
| 1356 | hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link) | 1356 | hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link) |
| 1357 | if (sp->gfn == gfn) { | 1357 | if (sp->gfn == gfn) { |
| 1358 | if (sp->unsync) | 1358 | if (sp->unsync) |
| 1359 | if (kvm_sync_page(vcpu, sp)) | 1359 | unsync_sp = sp; |
| 1360 | continue; | ||
| 1361 | 1360 | ||
| 1362 | if (sp->role.word != role.word) | 1361 | if (sp->role.word != role.word) |
| 1363 | continue; | 1362 | continue; |
| 1364 | 1363 | ||
| 1364 | if (!direct && unsync_sp && | ||
| 1365 | kvm_sync_page_transient(vcpu, unsync_sp)) { | ||
| 1366 | unsync_sp = NULL; | ||
| 1367 | break; | ||
| 1368 | } | ||
| 1369 | |||
| 1365 | mmu_page_add_parent_pte(vcpu, sp, parent_pte); | 1370 | mmu_page_add_parent_pte(vcpu, sp, parent_pte); |
| 1366 | if (sp->unsync_children) { | 1371 | if (sp->unsync_children) { |
| 1367 | set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); | 1372 | set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); |
| 1368 | kvm_mmu_mark_parents_unsync(sp); | 1373 | kvm_mmu_mark_parents_unsync(sp); |
| 1369 | } | 1374 | } else if (sp->unsync) |
| 1375 | kvm_mmu_mark_parents_unsync(sp); | ||
| 1376 | |||
| 1370 | trace_kvm_mmu_get_page(sp, false); | 1377 | trace_kvm_mmu_get_page(sp, false); |
| 1371 | return sp; | 1378 | return sp; |
| 1372 | } | 1379 | } |
| 1380 | if (!direct && unsync_sp) | ||
| 1381 | kvm_sync_page(vcpu, unsync_sp); | ||
| 1382 | |||
| 1373 | ++vcpu->kvm->stat.mmu_cache_miss; | 1383 | ++vcpu->kvm->stat.mmu_cache_miss; |
| 1374 | sp = kvm_mmu_alloc_page(vcpu, parent_pte); | 1384 | sp = kvm_mmu_alloc_page(vcpu, parent_pte); |
| 1375 | if (!sp) | 1385 | if (!sp) |
