aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2011-07-11 15:23:20 -0400
committerAvi Kivity <avi@redhat.com>2011-07-24 04:50:26 -0400
commitbebb106a5afa32efdf5332ed4a40bf4d6d06b56e (patch)
treeb8da141e846c9d25fbe4c69b12582cada03726e3 /arch/x86/kvm/paging_tmpl.h
parentaf7cc7d1ee422a612f6785e347a893d44cc892ea (diff)
KVM: MMU: cache mmio info on page fault path
If the page fault is caused by mmio, we can cache the mmio info, later, we do not need to walk guest page table and quickly know it is a mmio fault while we emulate the mmio instruction Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h21
1 files changed, 14 insertions, 7 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 1e1c2444cef5..f0fb1a4c522d 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -208,11 +208,8 @@ retry_walk:
208 goto error; 208 goto error;
209 } 209 }
210 210
211 if (unlikely(write_fault && !is_writable_pte(pte) 211 if (!check_write_user_access(vcpu, write_fault, user_fault,
212 && (user_fault || is_write_protection(vcpu)))) 212 pte))
213 eperm = true;
214
215 if (unlikely(user_fault && !(pte & PT_USER_MASK)))
216 eperm = true; 213 eperm = true;
217 214
218#if PTTYPE == 64 215#if PTTYPE == 64
@@ -625,8 +622,16 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
625 return 0; 622 return 0;
626 623
627 /* mmio */ 624 /* mmio */
628 if (is_error_pfn(pfn)) 625 if (is_error_pfn(pfn)) {
629 return kvm_handle_bad_page(vcpu->kvm, walker.gfn, pfn); 626 unsigned access = walker.pte_access;
627 bool dirty = is_dirty_gpte(walker.ptes[walker.level - 1]);
628
629 if (!dirty)
630 access &= ~ACC_WRITE_MASK;
631
632 return kvm_handle_bad_page(vcpu, mmu_is_nested(vcpu) ? 0 :
633 addr, access, walker.gfn, pfn);
634 }
630 635
631 spin_lock(&vcpu->kvm->mmu_lock); 636 spin_lock(&vcpu->kvm->mmu_lock);
632 if (mmu_notifier_retry(vcpu, mmu_seq)) 637 if (mmu_notifier_retry(vcpu, mmu_seq))
@@ -666,6 +671,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
666 u64 *sptep; 671 u64 *sptep;
667 int need_flush = 0; 672 int need_flush = 0;
668 673
674 vcpu_clear_mmio_info(vcpu, gva);
675
669 spin_lock(&vcpu->kvm->mmu_lock); 676 spin_lock(&vcpu->kvm->mmu_lock);
670 677
671 for_each_shadow_entry(vcpu, gva, iterator) { 678 for_each_shadow_entry(vcpu, gva, iterator) {