aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2010-05-15 06:51:24 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 03:35:49 -0400
commit1d9dc7e000915b9607b480e34fcb4238b789fbb1 (patch)
treebb90f755e648b97a69b10348e5ed2c65822d9c59 /arch/x86/kvm/mmu.c
parent98918833a3e21ffc5619535955e7a003cb788163 (diff)
KVM: MMU: split kvm_sync_page() function
Split kvm_sync_page() into kvm_sync_page() and kvm_sync_page_transient() to clarify the code address Avi's suggestion kvm_sync_page_transient() function only update shadow page but not mark it sync and not write protect sp->gfn. it will be used by later patch Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c29
1 files changed, 25 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5c9d6df0113e..ef5d140a2705 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1199,16 +1199,20 @@ static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1199 1199
1200static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp); 1200static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
1201 1201
1202static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 1202static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1203 bool clear_unsync)
1203{ 1204{
1204 if (sp->role.cr4_pae != !!is_pae(vcpu)) { 1205 if (sp->role.cr4_pae != !!is_pae(vcpu)) {
1205 kvm_mmu_zap_page(vcpu->kvm, sp); 1206 kvm_mmu_zap_page(vcpu->kvm, sp);
1206 return 1; 1207 return 1;
1207 } 1208 }
1208 1209
1209 if (rmap_write_protect(vcpu->kvm, sp->gfn)) 1210 if (clear_unsync) {
1210 kvm_flush_remote_tlbs(vcpu->kvm); 1211 if (rmap_write_protect(vcpu->kvm, sp->gfn))
1211 kvm_unlink_unsync_page(vcpu->kvm, sp); 1212 kvm_flush_remote_tlbs(vcpu->kvm);
1213 kvm_unlink_unsync_page(vcpu->kvm, sp);
1214 }
1215
1212 if (vcpu->arch.mmu.sync_page(vcpu, sp)) { 1216 if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
1213 kvm_mmu_zap_page(vcpu->kvm, sp); 1217 kvm_mmu_zap_page(vcpu->kvm, sp);
1214 return 1; 1218 return 1;
@@ -1218,6 +1222,23 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1218 return 0; 1222 return 0;
1219} 1223}
1220 1224
1225static void mmu_convert_notrap(struct kvm_mmu_page *sp);
1226static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
1227 struct kvm_mmu_page *sp)
1228{
1229 int ret;
1230
1231 ret = __kvm_sync_page(vcpu, sp, false);
1232 if (!ret)
1233 mmu_convert_notrap(sp);
1234 return ret;
1235}
1236
1237static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1238{
1239 return __kvm_sync_page(vcpu, sp, true);
1240}
1241
1221struct mmu_page_path { 1242struct mmu_page_path {
1222 struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1]; 1243 struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
1223 unsigned int idx[PT64_ROOT_LEVEL-1]; 1244 unsigned int idx[PT64_ROOT_LEVEL-1];