diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2014-01-03 14:09:32 -0500 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2014-01-15 06:16:16 -0500 |
commit | 37f6a4e237303549c8676dfe1fd1991ceab512eb (patch) | |
tree | e24c69c991f4c9624aaaed1bd2843a34adcd8bfd /arch | |
parent | ab53f22e2e2dbb60d4eb1b505776f03da4aa9bdc (diff) |
KVM: x86: handle invalid root_hpa everywhere
Rom Freiman <rom@stratoscale.com> notes other code paths vulnerable to
bug fixed by 989c6b34f6a9480e397b.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/mmu.c | 9 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 8 |
2 files changed, 17 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 31a570287fcc..e50425d0f5f7 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2832,6 +2832,9 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, | |||
2832 | bool ret = false; | 2832 | bool ret = false; |
2833 | u64 spte = 0ull; | 2833 | u64 spte = 0ull; |
2834 | 2834 | ||
2835 | if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) | ||
2836 | return false; | ||
2837 | |||
2835 | if (!page_fault_can_be_fast(error_code)) | 2838 | if (!page_fault_can_be_fast(error_code)) |
2836 | return false; | 2839 | return false; |
2837 | 2840 | ||
@@ -3227,6 +3230,9 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr) | |||
3227 | struct kvm_shadow_walk_iterator iterator; | 3230 | struct kvm_shadow_walk_iterator iterator; |
3228 | u64 spte = 0ull; | 3231 | u64 spte = 0ull; |
3229 | 3232 | ||
3233 | if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) | ||
3234 | return spte; | ||
3235 | |||
3230 | walk_shadow_page_lockless_begin(vcpu); | 3236 | walk_shadow_page_lockless_begin(vcpu); |
3231 | for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) | 3237 | for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) |
3232 | if (!is_shadow_present_pte(spte)) | 3238 | if (!is_shadow_present_pte(spte)) |
@@ -4513,6 +4519,9 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]) | |||
4513 | u64 spte; | 4519 | u64 spte; |
4514 | int nr_sptes = 0; | 4520 | int nr_sptes = 0; |
4515 | 4521 | ||
4522 | if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) | ||
4523 | return nr_sptes; | ||
4524 | |||
4516 | walk_shadow_page_lockless_begin(vcpu); | 4525 | walk_shadow_page_lockless_begin(vcpu); |
4517 | for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { | 4526 | for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { |
4518 | sptes[iterator.level-1] = spte; | 4527 | sptes[iterator.level-1] = spte; |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index ad75d77999d0..cba218a2f08d 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -569,6 +569,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
569 | if (FNAME(gpte_changed)(vcpu, gw, top_level)) | 569 | if (FNAME(gpte_changed)(vcpu, gw, top_level)) |
570 | goto out_gpte_changed; | 570 | goto out_gpte_changed; |
571 | 571 | ||
572 | if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) | ||
573 | goto out_gpte_changed; | ||
574 | |||
572 | for (shadow_walk_init(&it, vcpu, addr); | 575 | for (shadow_walk_init(&it, vcpu, addr); |
573 | shadow_walk_okay(&it) && it.level > gw->level; | 576 | shadow_walk_okay(&it) && it.level > gw->level; |
574 | shadow_walk_next(&it)) { | 577 | shadow_walk_next(&it)) { |
@@ -820,6 +823,11 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) | |||
820 | */ | 823 | */ |
821 | mmu_topup_memory_caches(vcpu); | 824 | mmu_topup_memory_caches(vcpu); |
822 | 825 | ||
826 | if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) { | ||
827 | WARN_ON(1); | ||
828 | return; | ||
829 | } | ||
830 | |||
823 | spin_lock(&vcpu->kvm->mmu_lock); | 831 | spin_lock(&vcpu->kvm->mmu_lock); |
824 | for_each_shadow_entry(vcpu, gva, iterator) { | 832 | for_each_shadow_entry(vcpu, gva, iterator) { |
825 | level = iterator.level; | 833 | level = iterator.level; |