aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2008-02-20 14:47:24 -0500
committerAvi Kivity <avi@qumranet.com>2008-04-27 04:53:25 -0400
commit2e53d63acba75795aa226febd140f67c58c6a353 (patch)
treebe4ad4e5b28c737053af78a950d270a657e9f628
parent847f0ad8cbfa70c1af6948025836dfbd9ed6da1e (diff)
KVM: MMU: ignore zapped root pagetables
Mark zapped root pagetables as invalid and ignore such pages during lookup. This is a problem with the cr3-target feature, where a zapped root table fools the faulting code into creating a read-only mapping. The result is a lockup if the instruction can't be emulated. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Cc: Anthony Liguori <aliguori@us.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r--arch/x86/kvm/mmu.c12
-rw-r--r--arch/x86/kvm/x86.c12
-rw-r--r--include/asm-x86/kvm_host.h1
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--virt/kvm/kvm_main.c23
5 files changed, 48 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f7541fe22cd..103d008dab8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -667,7 +667,8 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
667 index = kvm_page_table_hashfn(gfn); 667 index = kvm_page_table_hashfn(gfn);
668 bucket = &kvm->arch.mmu_page_hash[index]; 668 bucket = &kvm->arch.mmu_page_hash[index];
669 hlist_for_each_entry(sp, node, bucket, hash_link) 669 hlist_for_each_entry(sp, node, bucket, hash_link)
670 if (sp->gfn == gfn && !sp->role.metaphysical) { 670 if (sp->gfn == gfn && !sp->role.metaphysical
671 && !sp->role.invalid) {
671 pgprintk("%s: found role %x\n", 672 pgprintk("%s: found role %x\n",
672 __FUNCTION__, sp->role.word); 673 __FUNCTION__, sp->role.word);
673 return sp; 674 return sp;
@@ -792,8 +793,11 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
792 if (!sp->root_count) { 793 if (!sp->root_count) {
793 hlist_del(&sp->hash_link); 794 hlist_del(&sp->hash_link);
794 kvm_mmu_free_page(kvm, sp); 795 kvm_mmu_free_page(kvm, sp);
795 } else 796 } else {
796 list_move(&sp->link, &kvm->arch.active_mmu_pages); 797 list_move(&sp->link, &kvm->arch.active_mmu_pages);
798 sp->role.invalid = 1;
799 kvm_reload_remote_mmus(kvm);
800 }
797 kvm_mmu_reset_last_pte_updated(kvm); 801 kvm_mmu_reset_last_pte_updated(kvm);
798} 802}
799 803
@@ -1073,6 +1077,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
1073 1077
1074 sp = page_header(root); 1078 sp = page_header(root);
1075 --sp->root_count; 1079 --sp->root_count;
1080 if (!sp->root_count && sp->role.invalid)
1081 kvm_mmu_zap_page(vcpu->kvm, sp);
1076 vcpu->arch.mmu.root_hpa = INVALID_PAGE; 1082 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1077 spin_unlock(&vcpu->kvm->mmu_lock); 1083 spin_unlock(&vcpu->kvm->mmu_lock);
1078 return; 1084 return;
@@ -1085,6 +1091,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
1085 root &= PT64_BASE_ADDR_MASK; 1091 root &= PT64_BASE_ADDR_MASK;
1086 sp = page_header(root); 1092 sp = page_header(root);
1087 --sp->root_count; 1093 --sp->root_count;
1094 if (!sp->root_count && sp->role.invalid)
1095 kvm_mmu_zap_page(vcpu->kvm, sp);
1088 } 1096 }
1089 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; 1097 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1090 } 1098 }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0dd038e7392..e8e64927bdd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2658,6 +2658,10 @@ preempted:
2658 kvm_x86_ops->guest_debug_pre(vcpu); 2658 kvm_x86_ops->guest_debug_pre(vcpu);
2659 2659
2660again: 2660again:
2661 if (vcpu->requests)
2662 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
2663 kvm_mmu_unload(vcpu);
2664
2661 r = kvm_mmu_reload(vcpu); 2665 r = kvm_mmu_reload(vcpu);
2662 if (unlikely(r)) 2666 if (unlikely(r))
2663 goto out; 2667 goto out;
@@ -2689,6 +2693,14 @@ again:
2689 goto out; 2693 goto out;
2690 } 2694 }
2691 2695
2696 if (vcpu->requests)
2697 if (test_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) {
2698 local_irq_enable();
2699 preempt_enable();
2700 r = 1;
2701 goto out;
2702 }
2703
2692 if (signal_pending(current)) { 2704 if (signal_pending(current)) {
2693 local_irq_enable(); 2705 local_irq_enable();
2694 preempt_enable(); 2706 preempt_enable();
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 935ffa4db9f..8c3f74b7352 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -141,6 +141,7 @@ union kvm_mmu_page_role {
141 unsigned pad_for_nice_hex_output:6; 141 unsigned pad_for_nice_hex_output:6;
142 unsigned metaphysical:1; 142 unsigned metaphysical:1;
143 unsigned access:3; 143 unsigned access:3;
144 unsigned invalid:1;
144 }; 145 };
145}; 146};
146 147
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index eb88d32dd5c..994278fb588 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -37,6 +37,7 @@
37#define KVM_REQ_TLB_FLUSH 0 37#define KVM_REQ_TLB_FLUSH 0
38#define KVM_REQ_MIGRATE_TIMER 1 38#define KVM_REQ_MIGRATE_TIMER 1
39#define KVM_REQ_REPORT_TPR_ACCESS 2 39#define KVM_REQ_REPORT_TPR_ACCESS 2
40#define KVM_REQ_MMU_RELOAD 3
40 41
41struct kvm_vcpu; 42struct kvm_vcpu;
42extern struct kmem_cache *kvm_vcpu_cache; 43extern struct kmem_cache *kvm_vcpu_cache;
@@ -190,6 +191,7 @@ void kvm_resched(struct kvm_vcpu *vcpu);
190void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); 191void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
191void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); 192void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
192void kvm_flush_remote_tlbs(struct kvm *kvm); 193void kvm_flush_remote_tlbs(struct kvm *kvm);
194void kvm_reload_remote_mmus(struct kvm *kvm);
193 195
194long kvm_arch_dev_ioctl(struct file *filp, 196long kvm_arch_dev_ioctl(struct file *filp,
195 unsigned int ioctl, unsigned long arg); 197 unsigned int ioctl, unsigned long arg);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index cf6df5167af..c41eb57ce29 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -119,6 +119,29 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
119 smp_call_function_mask(cpus, ack_flush, NULL, 1); 119 smp_call_function_mask(cpus, ack_flush, NULL, 1);
120} 120}
121 121
122void kvm_reload_remote_mmus(struct kvm *kvm)
123{
124 int i, cpu;
125 cpumask_t cpus;
126 struct kvm_vcpu *vcpu;
127
128 cpus_clear(cpus);
129 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
130 vcpu = kvm->vcpus[i];
131 if (!vcpu)
132 continue;
133 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
134 continue;
135 cpu = vcpu->cpu;
136 if (cpu != -1 && cpu != raw_smp_processor_id())
137 cpu_set(cpu, cpus);
138 }
139 if (cpus_empty(cpus))
140 return;
141 smp_call_function_mask(cpus, ack_flush, NULL, 1);
142}
143
144
122int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 145int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
123{ 146{
124 struct page *page; 147 struct page *page;