aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2011-07-11 15:29:38 -0400
committerAvi Kivity <avi@redhat.com>2011-07-24 04:50:35 -0400
commitd7c55201e66e9f702db575c9dfc2d34a7af6cf1f (patch)
tree04ed0e93a16fbc7a50ee5a02586ae54cfdf68e39 /arch/x86/kvm/mmu.c
parentfce92dce79dbf5fff39c7ac2fb149729d79b7a39 (diff)
KVM: MMU: abstract some functions to handle fault pfn
Introduce handle_abnormal_pfn to handle fault pfn on page fault path, introduce mmu_invalid_pfn to handle fault pfn on prefetch path It is the preparing work for mmio page fault support Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c47
1 files changed, 35 insertions, 12 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 96a7ed4e683..1d4a2d9cc71 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2221,18 +2221,15 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *
2221 send_sig_info(SIGBUS, &info, tsk); 2221 send_sig_info(SIGBUS, &info, tsk);
2222} 2222}
2223 2223
2224static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gva_t gva, 2224static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn)
2225 unsigned access, gfn_t gfn, pfn_t pfn)
2226{ 2225{
2227 kvm_release_pfn_clean(pfn); 2226 kvm_release_pfn_clean(pfn);
2228 if (is_hwpoison_pfn(pfn)) { 2227 if (is_hwpoison_pfn(pfn)) {
2229 kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current); 2228 kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current);
2230 return 0; 2229 return 0;
2231 } else if (is_fault_pfn(pfn)) 2230 }
2232 return -EFAULT;
2233 2231
2234 vcpu_cache_mmio_info(vcpu, gva, gfn, access); 2232 return -EFAULT;
2235 return 1;
2236} 2233}
2237 2234
2238static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, 2235static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
@@ -2277,6 +2274,33 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
2277 } 2274 }
2278} 2275}
2279 2276
2277static bool mmu_invalid_pfn(pfn_t pfn)
2278{
2279 return unlikely(is_invalid_pfn(pfn) || is_noslot_pfn(pfn));
2280}
2281
2282static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
2283 pfn_t pfn, unsigned access, int *ret_val)
2284{
2285 bool ret = true;
2286
2287 /* The pfn is invalid, report the error! */
2288 if (unlikely(is_invalid_pfn(pfn))) {
2289 *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
2290 goto exit;
2291 }
2292
2293 if (unlikely(is_noslot_pfn(pfn))) {
2294 vcpu_cache_mmio_info(vcpu, gva, gfn, access);
2295 *ret_val = 1;
2296 goto exit;
2297 }
2298
2299 ret = false;
2300exit:
2301 return ret;
2302}
2303
2280static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, 2304static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
2281 gva_t gva, pfn_t *pfn, bool write, bool *writable); 2305 gva_t gva, pfn_t *pfn, bool write, bool *writable);
2282 2306
@@ -2311,9 +2335,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
2311 if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) 2335 if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
2312 return 0; 2336 return 0;
2313 2337
2314 /* mmio */ 2338 if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
2315 if (is_error_pfn(pfn)) 2339 return r;
2316 return kvm_handle_bad_page(vcpu, v, ACC_ALL, gfn, pfn);
2317 2340
2318 spin_lock(&vcpu->kvm->mmu_lock); 2341 spin_lock(&vcpu->kvm->mmu_lock);
2319 if (mmu_notifier_retry(vcpu, mmu_seq)) 2342 if (mmu_notifier_retry(vcpu, mmu_seq))
@@ -2685,9 +2708,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
2685 if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) 2708 if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
2686 return 0; 2709 return 0;
2687 2710
2688 /* mmio */ 2711 if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
2689 if (is_error_pfn(pfn)) 2712 return r;
2690 return kvm_handle_bad_page(vcpu, 0, 0, gfn, pfn); 2713
2691 spin_lock(&vcpu->kvm->mmu_lock); 2714 spin_lock(&vcpu->kvm->mmu_lock);
2692 if (mmu_notifier_retry(vcpu, mmu_seq)) 2715 if (mmu_notifier_retry(vcpu, mmu_seq))
2693 goto out_unlock; 2716 goto out_unlock;