diff options
author | Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> | 2010-06-11 09:31:38 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-08-01 03:46:42 -0400 |
commit | be71e061d15c0aad4f8c2606f76c57b8a19792fd (patch) | |
tree | d103cf12b162d2e4f8855b9bad5f92e640987d76 /arch/x86/kvm | |
parent | f918b443527e98476c8cc45683152106b9e4bedc (diff) |
KVM: MMU: don't mark pte notrap if it's just sync transient
If the sync-sp just sync transient, don't mark its pte notrap
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/mmu.c | 11 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 5 |
2 files changed, 7 insertions, 9 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index ff333572be75..d1e09f3c5614 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1103,7 +1103,7 @@ static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu, | |||
1103 | } | 1103 | } |
1104 | 1104 | ||
1105 | static int nonpaging_sync_page(struct kvm_vcpu *vcpu, | 1105 | static int nonpaging_sync_page(struct kvm_vcpu *vcpu, |
1106 | struct kvm_mmu_page *sp) | 1106 | struct kvm_mmu_page *sp, bool clear_unsync) |
1107 | { | 1107 | { |
1108 | return 1; | 1108 | return 1; |
1109 | } | 1109 | } |
@@ -1228,7 +1228,7 @@ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, | |||
1228 | if (clear_unsync) | 1228 | if (clear_unsync) |
1229 | kvm_unlink_unsync_page(vcpu->kvm, sp); | 1229 | kvm_unlink_unsync_page(vcpu->kvm, sp); |
1230 | 1230 | ||
1231 | if (vcpu->arch.mmu.sync_page(vcpu, sp)) { | 1231 | if (vcpu->arch.mmu.sync_page(vcpu, sp, clear_unsync)) { |
1232 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); | 1232 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); |
1233 | return 1; | 1233 | return 1; |
1234 | } | 1234 | } |
@@ -1237,7 +1237,6 @@ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, | |||
1237 | return 0; | 1237 | return 0; |
1238 | } | 1238 | } |
1239 | 1239 | ||
1240 | static void mmu_convert_notrap(struct kvm_mmu_page *sp); | ||
1241 | static int kvm_sync_page_transient(struct kvm_vcpu *vcpu, | 1240 | static int kvm_sync_page_transient(struct kvm_vcpu *vcpu, |
1242 | struct kvm_mmu_page *sp) | 1241 | struct kvm_mmu_page *sp) |
1243 | { | 1242 | { |
@@ -1245,9 +1244,7 @@ static int kvm_sync_page_transient(struct kvm_vcpu *vcpu, | |||
1245 | int ret; | 1244 | int ret; |
1246 | 1245 | ||
1247 | ret = __kvm_sync_page(vcpu, sp, &invalid_list, false); | 1246 | ret = __kvm_sync_page(vcpu, sp, &invalid_list, false); |
1248 | if (!ret) | 1247 | if (ret) |
1249 | mmu_convert_notrap(sp); | ||
1250 | else | ||
1251 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); | 1248 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); |
1252 | 1249 | ||
1253 | return ret; | 1250 | return ret; |
@@ -1273,7 +1270,7 @@ static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) | |||
1273 | 1270 | ||
1274 | WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); | 1271 | WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); |
1275 | if ((s->role.cr4_pae != !!is_pae(vcpu)) || | 1272 | if ((s->role.cr4_pae != !!is_pae(vcpu)) || |
1276 | (vcpu->arch.mmu.sync_page(vcpu, s))) { | 1273 | (vcpu->arch.mmu.sync_page(vcpu, s, true))) { |
1277 | kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list); | 1274 | kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list); |
1278 | continue; | 1275 | continue; |
1279 | } | 1276 | } |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index efba353369e7..863920f649fb 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -578,7 +578,8 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, | |||
578 | * can't change unless all sptes pointing to it are nuked first. | 578 | * can't change unless all sptes pointing to it are nuked first. |
579 | * - Alias changes zap the entire shadow cache. | 579 | * - Alias changes zap the entire shadow cache. |
580 | */ | 580 | */ |
581 | static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | 581 | static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
582 | bool clear_unsync) | ||
582 | { | 583 | { |
583 | int i, offset, nr_present; | 584 | int i, offset, nr_present; |
584 | bool reset_host_protection; | 585 | bool reset_host_protection; |
@@ -615,7 +616,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
615 | u64 nonpresent; | 616 | u64 nonpresent; |
616 | 617 | ||
617 | rmap_remove(vcpu->kvm, &sp->spt[i]); | 618 | rmap_remove(vcpu->kvm, &sp->spt[i]); |
618 | if (is_present_gpte(gpte)) | 619 | if (is_present_gpte(gpte) || !clear_unsync) |
619 | nonpresent = shadow_trap_nonpresent_pte; | 620 | nonpresent = shadow_trap_nonpresent_pte; |
620 | else | 621 | else |
621 | nonpresent = shadow_notrap_nonpresent_pte; | 622 | nonpresent = shadow_notrap_nonpresent_pte; |