aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2011-07-11 15:23:20 -0400
committerAvi Kivity <avi@redhat.com>2011-07-24 04:50:26 -0400
commitbebb106a5afa32efdf5332ed4a40bf4d6d06b56e (patch)
treeb8da141e846c9d25fbe4c69b12582cada03726e3 /arch/x86/kvm/mmu.c
parentaf7cc7d1ee422a612f6785e347a893d44cc892ea (diff)
KVM: MMU: cache mmio info on page fault path
If the page fault is caused by mmio, we can cache the mmio info, later, we do not need to walk guest page table and quickly know it is a mmio fault while we emulate the mmio instruction Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c21
1 files changed, 7 insertions, 14 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 02c839f40e29..d1986b7dcec7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -217,11 +217,6 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
217} 217}
218EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); 218EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
219 219
220static bool is_write_protection(struct kvm_vcpu *vcpu)
221{
222 return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
223}
224
225static int is_cpuid_PSE36(void) 220static int is_cpuid_PSE36(void)
226{ 221{
227 return 1; 222 return 1;
@@ -243,11 +238,6 @@ static int is_large_pte(u64 pte)
243 return pte & PT_PAGE_SIZE_MASK; 238 return pte & PT_PAGE_SIZE_MASK;
244} 239}
245 240
246static int is_writable_pte(unsigned long pte)
247{
248 return pte & PT_WRITABLE_MASK;
249}
250
251static int is_dirty_gpte(unsigned long pte) 241static int is_dirty_gpte(unsigned long pte)
252{ 242{
253 return pte & PT_DIRTY_MASK; 243 return pte & PT_DIRTY_MASK;
@@ -2247,15 +2237,17 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *
2247 send_sig_info(SIGBUS, &info, tsk); 2237 send_sig_info(SIGBUS, &info, tsk);
2248} 2238}
2249 2239
2250static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn) 2240static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gva_t gva,
2241 unsigned access, gfn_t gfn, pfn_t pfn)
2251{ 2242{
2252 kvm_release_pfn_clean(pfn); 2243 kvm_release_pfn_clean(pfn);
2253 if (is_hwpoison_pfn(pfn)) { 2244 if (is_hwpoison_pfn(pfn)) {
2254 kvm_send_hwpoison_signal(gfn_to_hva(kvm, gfn), current); 2245 kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current);
2255 return 0; 2246 return 0;
2256 } else if (is_fault_pfn(pfn)) 2247 } else if (is_fault_pfn(pfn))
2257 return -EFAULT; 2248 return -EFAULT;
2258 2249
2250 vcpu_cache_mmio_info(vcpu, gva, gfn, access);
2259 return 1; 2251 return 1;
2260} 2252}
2261 2253
@@ -2337,7 +2329,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
2337 2329
2338 /* mmio */ 2330 /* mmio */
2339 if (is_error_pfn(pfn)) 2331 if (is_error_pfn(pfn))
2340 return kvm_handle_bad_page(vcpu->kvm, gfn, pfn); 2332 return kvm_handle_bad_page(vcpu, v, ACC_ALL, gfn, pfn);
2341 2333
2342 spin_lock(&vcpu->kvm->mmu_lock); 2334 spin_lock(&vcpu->kvm->mmu_lock);
2343 if (mmu_notifier_retry(vcpu, mmu_seq)) 2335 if (mmu_notifier_retry(vcpu, mmu_seq))
@@ -2564,6 +2556,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
2564 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) 2556 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2565 return; 2557 return;
2566 2558
2559 vcpu_clear_mmio_info(vcpu, ~0ul);
2567 trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); 2560 trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
2568 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { 2561 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
2569 hpa_t root = vcpu->arch.mmu.root_hpa; 2562 hpa_t root = vcpu->arch.mmu.root_hpa;
@@ -2710,7 +2703,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
2710 2703
2711 /* mmio */ 2704 /* mmio */
2712 if (is_error_pfn(pfn)) 2705 if (is_error_pfn(pfn))
2713 return kvm_handle_bad_page(vcpu->kvm, gfn, pfn); 2706 return kvm_handle_bad_page(vcpu, 0, 0, gfn, pfn);
2714 spin_lock(&vcpu->kvm->mmu_lock); 2707 spin_lock(&vcpu->kvm->mmu_lock);
2715 if (mmu_notifier_retry(vcpu, mmu_seq)) 2708 if (mmu_notifier_retry(vcpu, mmu_seq))
2716 goto out_unlock; 2709 goto out_unlock;