aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2008-04-04 13:56:44 -0400
committerAvi Kivity <avi@qumranet.com>2008-04-27 05:00:58 -0400
commitbed1d1dfc4a458d82bcd258082638cbba860190d (patch)
tree344f9cbf54082161e54ba77164f55aec695b9403 /arch/x86/kvm
parentfcd6dbac9267c1c06a205ad8bb4bd027c0ace7f7 (diff)
KVM: MMU: prepopulate guest pages after write-protecting
Zdenek reported a bug where a looping "dmsetup status" eventually hangs on SMP guests. The problem is that kvm_mmu_get_page() prepopulates the shadow MMU before write protecting the guest page tables. By doing so, it leaves a window open where the guest can mark a pte as present while the host has shadow cached such pte as "notrap". Accesses to such address will fault in the guest without the host having a chance to fix the situation. Fix by moving the write protection before the pte prefetch. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/mmu.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5c4c16662c68..c89bf230af67 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -852,9 +852,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
852 sp->gfn = gfn; 852 sp->gfn = gfn;
853 sp->role = role; 853 sp->role = role;
854 hlist_add_head(&sp->hash_link, bucket); 854 hlist_add_head(&sp->hash_link, bucket);
855 vcpu->arch.mmu.prefetch_page(vcpu, sp);
856 if (!metaphysical) 855 if (!metaphysical)
857 rmap_write_protect(vcpu->kvm, gfn); 856 rmap_write_protect(vcpu->kvm, gfn);
857 vcpu->arch.mmu.prefetch_page(vcpu, sp);
858 return sp; 858 return sp;
859} 859}
860 860