aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2011-07-11 15:23:20 -0400
committerAvi Kivity <avi@redhat.com>2011-07-24 04:50:26 -0400
commitbebb106a5afa32efdf5332ed4a40bf4d6d06b56e (patch)
treeb8da141e846c9d25fbe4c69b12582cada03726e3 /arch/x86
parentaf7cc7d1ee422a612f6785e347a893d44cc892ea (diff)
KVM: MMU: cache mmio info on page fault path
If the page fault is caused by mmio, we can cache the mmio info, later, we do not need to walk guest page table and quickly know it is a mmio fault while we emulate the mmio instruction Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h5
-rw-r--r--arch/x86/kvm/mmu.c21
-rw-r--r--arch/x86/kvm/mmu.h23
-rw-r--r--arch/x86/kvm/paging_tmpl.h21
-rw-r--r--arch/x86/kvm/x86.c11
-rw-r--r--arch/x86/kvm/x86.h36
6 files changed, 96 insertions, 21 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 59086a77ff13..8da1400ab581 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -424,6 +424,11 @@ struct kvm_vcpu_arch {
424 u64 mcg_ctl; 424 u64 mcg_ctl;
425 u64 *mce_banks; 425 u64 *mce_banks;
426 426
427 /* Cache MMIO info */
428 u64 mmio_gva;
429 unsigned access;
430 gfn_t mmio_gfn;
431
427 /* used for guest single stepping over the given code position */ 432 /* used for guest single stepping over the given code position */
428 unsigned long singlestep_rip; 433 unsigned long singlestep_rip;
429 434
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 02c839f40e29..d1986b7dcec7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -217,11 +217,6 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
217} 217}
218EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); 218EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
219 219
220static bool is_write_protection(struct kvm_vcpu *vcpu)
221{
222 return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
223}
224
225static int is_cpuid_PSE36(void) 220static int is_cpuid_PSE36(void)
226{ 221{
227 return 1; 222 return 1;
@@ -243,11 +238,6 @@ static int is_large_pte(u64 pte)
243 return pte & PT_PAGE_SIZE_MASK; 238 return pte & PT_PAGE_SIZE_MASK;
244} 239}
245 240
246static int is_writable_pte(unsigned long pte)
247{
248 return pte & PT_WRITABLE_MASK;
249}
250
251static int is_dirty_gpte(unsigned long pte) 241static int is_dirty_gpte(unsigned long pte)
252{ 242{
253 return pte & PT_DIRTY_MASK; 243 return pte & PT_DIRTY_MASK;
@@ -2247,15 +2237,17 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *
2247 send_sig_info(SIGBUS, &info, tsk); 2237 send_sig_info(SIGBUS, &info, tsk);
2248} 2238}
2249 2239
2250static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn) 2240static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gva_t gva,
2241 unsigned access, gfn_t gfn, pfn_t pfn)
2251{ 2242{
2252 kvm_release_pfn_clean(pfn); 2243 kvm_release_pfn_clean(pfn);
2253 if (is_hwpoison_pfn(pfn)) { 2244 if (is_hwpoison_pfn(pfn)) {
2254 kvm_send_hwpoison_signal(gfn_to_hva(kvm, gfn), current); 2245 kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current);
2255 return 0; 2246 return 0;
2256 } else if (is_fault_pfn(pfn)) 2247 } else if (is_fault_pfn(pfn))
2257 return -EFAULT; 2248 return -EFAULT;
2258 2249
2250 vcpu_cache_mmio_info(vcpu, gva, gfn, access);
2259 return 1; 2251 return 1;
2260} 2252}
2261 2253
@@ -2337,7 +2329,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
2337 2329
2338 /* mmio */ 2330 /* mmio */
2339 if (is_error_pfn(pfn)) 2331 if (is_error_pfn(pfn))
2340 return kvm_handle_bad_page(vcpu->kvm, gfn, pfn); 2332 return kvm_handle_bad_page(vcpu, v, ACC_ALL, gfn, pfn);
2341 2333
2342 spin_lock(&vcpu->kvm->mmu_lock); 2334 spin_lock(&vcpu->kvm->mmu_lock);
2343 if (mmu_notifier_retry(vcpu, mmu_seq)) 2335 if (mmu_notifier_retry(vcpu, mmu_seq))
@@ -2564,6 +2556,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
2564 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) 2556 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2565 return; 2557 return;
2566 2558
2559 vcpu_clear_mmio_info(vcpu, ~0ul);
2567 trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); 2560 trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
2568 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { 2561 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
2569 hpa_t root = vcpu->arch.mmu.root_hpa; 2562 hpa_t root = vcpu->arch.mmu.root_hpa;
@@ -2710,7 +2703,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
2710 2703
2711 /* mmio */ 2704 /* mmio */
2712 if (is_error_pfn(pfn)) 2705 if (is_error_pfn(pfn))
2713 return kvm_handle_bad_page(vcpu->kvm, gfn, pfn); 2706 return kvm_handle_bad_page(vcpu, 0, 0, gfn, pfn);
2714 spin_lock(&vcpu->kvm->mmu_lock); 2707 spin_lock(&vcpu->kvm->mmu_lock);
2715 if (mmu_notifier_retry(vcpu, mmu_seq)) 2708 if (mmu_notifier_retry(vcpu, mmu_seq))
2716 goto out_unlock; 2709 goto out_unlock;
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 7086ca85d3e7..05310b105dac 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -76,4 +76,27 @@ static inline int is_present_gpte(unsigned long pte)
76 return pte & PT_PRESENT_MASK; 76 return pte & PT_PRESENT_MASK;
77} 77}
78 78
79static inline int is_writable_pte(unsigned long pte)
80{
81 return pte & PT_WRITABLE_MASK;
82}
83
84static inline bool is_write_protection(struct kvm_vcpu *vcpu)
85{
86 return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
87}
88
89static inline bool check_write_user_access(struct kvm_vcpu *vcpu,
90 bool write_fault, bool user_fault,
91 unsigned long pte)
92{
93 if (unlikely(write_fault && !is_writable_pte(pte)
94 && (user_fault || is_write_protection(vcpu))))
95 return false;
96
97 if (unlikely(user_fault && !(pte & PT_USER_MASK)))
98 return false;
99
100 return true;
101}
79#endif 102#endif
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 1e1c2444cef5..f0fb1a4c522d 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -208,11 +208,8 @@ retry_walk:
208 goto error; 208 goto error;
209 } 209 }
210 210
211 if (unlikely(write_fault && !is_writable_pte(pte) 211 if (!check_write_user_access(vcpu, write_fault, user_fault,
212 && (user_fault || is_write_protection(vcpu)))) 212 pte))
213 eperm = true;
214
215 if (unlikely(user_fault && !(pte & PT_USER_MASK)))
216 eperm = true; 213 eperm = true;
217 214
218#if PTTYPE == 64 215#if PTTYPE == 64
@@ -625,8 +622,16 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
625 return 0; 622 return 0;
626 623
627 /* mmio */ 624 /* mmio */
628 if (is_error_pfn(pfn)) 625 if (is_error_pfn(pfn)) {
629 return kvm_handle_bad_page(vcpu->kvm, walker.gfn, pfn); 626 unsigned access = walker.pte_access;
627 bool dirty = is_dirty_gpte(walker.ptes[walker.level - 1]);
628
629 if (!dirty)
630 access &= ~ACC_WRITE_MASK;
631
632 return kvm_handle_bad_page(vcpu, mmu_is_nested(vcpu) ? 0 :
633 addr, access, walker.gfn, pfn);
634 }
630 635
631 spin_lock(&vcpu->kvm->mmu_lock); 636 spin_lock(&vcpu->kvm->mmu_lock);
632 if (mmu_notifier_retry(vcpu, mmu_seq)) 637 if (mmu_notifier_retry(vcpu, mmu_seq))
@@ -666,6 +671,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
666 u64 *sptep; 671 u64 *sptep;
667 int need_flush = 0; 672 int need_flush = 0;
668 673
674 vcpu_clear_mmio_info(vcpu, gva);
675
669 spin_lock(&vcpu->kvm->mmu_lock); 676 spin_lock(&vcpu->kvm->mmu_lock);
670 677
671 for_each_shadow_entry(vcpu, gva, iterator) { 678 for_each_shadow_entry(vcpu, gva, iterator) {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a1dbd0443545..028a0f25e8a0 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4016,6 +4016,14 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
4016{ 4016{
4017 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 4017 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4018 4018
4019 if (vcpu_match_mmio_gva(vcpu, gva) &&
4020 check_write_user_access(vcpu, write, access,
4021 vcpu->arch.access)) {
4022 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
4023 (gva & (PAGE_SIZE - 1));
4024 return 1;
4025 }
4026
4019 if (write) 4027 if (write)
4020 access |= PFERR_WRITE_MASK; 4028 access |= PFERR_WRITE_MASK;
4021 4029
@@ -4028,6 +4036,9 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
4028 if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 4036 if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
4029 return 1; 4037 return 1;
4030 4038
4039 if (vcpu_match_mmio_gpa(vcpu, *gpa))
4040 return 1;
4041
4031 return 0; 4042 return 0;
4032} 4043}
4033 4044
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 256da82856bd..d36fe237c665 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -75,6 +75,42 @@ static inline u32 bit(int bitno)
75 return 1 << (bitno & 31); 75 return 1 << (bitno & 31);
76} 76}
77 77
78static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
79 gva_t gva, gfn_t gfn, unsigned access)
80{
81 vcpu->arch.mmio_gva = gva & PAGE_MASK;
82 vcpu->arch.access = access;
83 vcpu->arch.mmio_gfn = gfn;
84}
85
86/*
87 * Clear the mmio cache info for the given gva,
88 * specially, if gva is ~0ul, we clear all mmio cache info.
89 */
90static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
91{
92 if (gva != (~0ul) && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
93 return;
94
95 vcpu->arch.mmio_gva = 0;
96}
97
98static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
99{
100 if (vcpu->arch.mmio_gva && vcpu->arch.mmio_gva == (gva & PAGE_MASK))
101 return true;
102
103 return false;
104}
105
106static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
107{
108 if (vcpu->arch.mmio_gfn && vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
109 return true;
110
111 return false;
112}
113
78void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); 114void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
79void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); 115void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
80int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); 116int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);