aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2008-02-20 14:47:24 -0500
committerAvi Kivity <avi@qumranet.com>2008-04-27 04:53:25 -0400
commit2e53d63acba75795aa226febd140f67c58c6a353 (patch)
treebe4ad4e5b28c737053af78a950d270a657e9f628 /arch/x86/kvm/mmu.c
parent847f0ad8cbfa70c1af6948025836dfbd9ed6da1e (diff)
KVM: MMU: ignore zapped root pagetables
Mark zapped root pagetables as invalid and ignore such pages during lookup. This is a problem with the cr3-target feature, where a zapped root table fools the faulting code into creating a read-only mapping. The result is a lockup if the instruction can't be emulated. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Cc: Anthony Liguori <aliguori@us.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f7541fe22cd8..103d008dab8b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -667,7 +667,8 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
667 index = kvm_page_table_hashfn(gfn); 667 index = kvm_page_table_hashfn(gfn);
668 bucket = &kvm->arch.mmu_page_hash[index]; 668 bucket = &kvm->arch.mmu_page_hash[index];
669 hlist_for_each_entry(sp, node, bucket, hash_link) 669 hlist_for_each_entry(sp, node, bucket, hash_link)
670 if (sp->gfn == gfn && !sp->role.metaphysical) { 670 if (sp->gfn == gfn && !sp->role.metaphysical
671 && !sp->role.invalid) {
671 pgprintk("%s: found role %x\n", 672 pgprintk("%s: found role %x\n",
672 __FUNCTION__, sp->role.word); 673 __FUNCTION__, sp->role.word);
673 return sp; 674 return sp;
@@ -792,8 +793,11 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
792 if (!sp->root_count) { 793 if (!sp->root_count) {
793 hlist_del(&sp->hash_link); 794 hlist_del(&sp->hash_link);
794 kvm_mmu_free_page(kvm, sp); 795 kvm_mmu_free_page(kvm, sp);
795 } else 796 } else {
796 list_move(&sp->link, &kvm->arch.active_mmu_pages); 797 list_move(&sp->link, &kvm->arch.active_mmu_pages);
798 sp->role.invalid = 1;
799 kvm_reload_remote_mmus(kvm);
800 }
797 kvm_mmu_reset_last_pte_updated(kvm); 801 kvm_mmu_reset_last_pte_updated(kvm);
798} 802}
799 803
@@ -1073,6 +1077,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
1073 1077
1074 sp = page_header(root); 1078 sp = page_header(root);
1075 --sp->root_count; 1079 --sp->root_count;
1080 if (!sp->root_count && sp->role.invalid)
1081 kvm_mmu_zap_page(vcpu->kvm, sp);
1076 vcpu->arch.mmu.root_hpa = INVALID_PAGE; 1082 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1077 spin_unlock(&vcpu->kvm->mmu_lock); 1083 spin_unlock(&vcpu->kvm->mmu_lock);
1078 return; 1084 return;
@@ -1085,6 +1091,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
1085 root &= PT64_BASE_ADDR_MASK; 1091 root &= PT64_BASE_ADDR_MASK;
1086 sp = page_header(root); 1092 sp = page_header(root);
1087 --sp->root_count; 1093 --sp->root_count;
1094 if (!sp->root_count && sp->role.invalid)
1095 kvm_mmu_zap_page(vcpu->kvm, sp);
1088 } 1096 }
1089 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; 1097 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1090 } 1098 }