aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXiao Guangrong <guangrong.xiao@linux.intel.com>2015-08-05 00:04:26 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2015-08-05 06:47:26 -0400
commit47ab8751695f71d85562ff497e21b521c789247c (patch)
treeb7cce4fcaf8ce65690475d25e78817623625ee4b
parentd625b155d21bdedf7d289d422d73c644e2205624 (diff)
KVM: MMU: fully check zero bits for sptes
The #PF with PFEC.RSV = 1 is designed to speed MMIO emulation, however, it is possible that the RSV #PF is caused by real BUG by mis-configure shadow page table entries This patch enables full check for the zero bits on shadow page table entries (which includes not only bits reserved by the hardware, but also bits that will never be set in the SPTE), then dump the shadow page table hierarchy. Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/mmu.c43
1 files changed, 37 insertions, 6 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 823e3bbbbfdd..dfa3cee2aa10 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3295,31 +3295,62 @@ static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3295 return vcpu_match_mmio_gva(vcpu, addr); 3295 return vcpu_match_mmio_gva(vcpu, addr);
3296} 3296}
3297 3297
3298static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr) 3298/* return true if reserved bit is detected on spte. */
3299static bool
3300walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
3299{ 3301{
3300 struct kvm_shadow_walk_iterator iterator; 3302 struct kvm_shadow_walk_iterator iterator;
3301 u64 spte = 0ull; 3303 u64 sptes[PT64_ROOT_LEVEL], spte = 0ull;
3304 int root, leaf;
3305 bool reserved = false;
3302 3306
3303 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) 3307 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3304 return spte; 3308 goto exit;
3305 3309
3306 walk_shadow_page_lockless_begin(vcpu); 3310 walk_shadow_page_lockless_begin(vcpu);
3307 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) 3311
3312 for (shadow_walk_init(&iterator, vcpu, addr), root = iterator.level;
3313 shadow_walk_okay(&iterator);
3314 __shadow_walk_next(&iterator, spte)) {
3315 leaf = iterator.level;
3316 spte = mmu_spte_get_lockless(iterator.sptep);
3317
3318 sptes[leaf - 1] = spte;
3319
3308 if (!is_shadow_present_pte(spte)) 3320 if (!is_shadow_present_pte(spte))
3309 break; 3321 break;
3322
3323 reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte,
3324 leaf);
3325 }
3326
3310 walk_shadow_page_lockless_end(vcpu); 3327 walk_shadow_page_lockless_end(vcpu);
3311 3328
3312 return spte; 3329 if (reserved) {
3330 pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
3331 __func__, addr);
3332 while (root >= leaf) {
3333 pr_err("------ spte 0x%llx level %d.\n",
3334 sptes[root - 1], root);
3335 root--;
3336 }
3337 }
3338exit:
3339 *sptep = spte;
3340 return reserved;
3313} 3341}
3314 3342
3315int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct) 3343int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3316{ 3344{
3317 u64 spte; 3345 u64 spte;
3346 bool reserved;
3318 3347
3319 if (quickly_check_mmio_pf(vcpu, addr, direct)) 3348 if (quickly_check_mmio_pf(vcpu, addr, direct))
3320 return RET_MMIO_PF_EMULATE; 3349 return RET_MMIO_PF_EMULATE;
3321 3350
3322 spte = walk_shadow_page_get_mmio_spte(vcpu, addr); 3351 reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
3352 if (unlikely(reserved))
3353 return RET_MMIO_PF_BUG;
3323 3354
3324 if (is_mmio_spte(spte)) { 3355 if (is_mmio_spte(spte)) {
3325 gfn_t gfn = get_mmio_spte_gfn(spte); 3356 gfn_t gfn = get_mmio_spte_gfn(spte);