aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2011-07-11 15:29:38 -0400
committerAvi Kivity <avi@redhat.com>2011-07-24 04:50:35 -0400
commitd7c55201e66e9f702db575c9dfc2d34a7af6cf1f (patch)
tree04ed0e93a16fbc7a50ee5a02586ae54cfdf68e39 /arch/x86
parentfce92dce79dbf5fff39c7ac2fb149729d79b7a39 (diff)
KVM: MMU: abstract some functions to handle fault pfn
Introduce handle_abnormal_pfn to handle fault pfn on page fault path, introduce mmu_invalid_pfn to handle fault pfn on prefetch path It is the preparing work for mmio page fault support Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/mmu.c47
-rw-r--r--arch/x86/kvm/paging_tmpl.h12
2 files changed, 41 insertions, 18 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 96a7ed4e6837..1d4a2d9cc718 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2221,18 +2221,15 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *
2221 send_sig_info(SIGBUS, &info, tsk); 2221 send_sig_info(SIGBUS, &info, tsk);
2222} 2222}
2223 2223
2224static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gva_t gva, 2224static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn)
2225 unsigned access, gfn_t gfn, pfn_t pfn)
2226{ 2225{
2227 kvm_release_pfn_clean(pfn); 2226 kvm_release_pfn_clean(pfn);
2228 if (is_hwpoison_pfn(pfn)) { 2227 if (is_hwpoison_pfn(pfn)) {
2229 kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current); 2228 kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current);
2230 return 0; 2229 return 0;
2231 } else if (is_fault_pfn(pfn)) 2230 }
2232 return -EFAULT;
2233 2231
2234 vcpu_cache_mmio_info(vcpu, gva, gfn, access); 2232 return -EFAULT;
2235 return 1;
2236} 2233}
2237 2234
2238static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, 2235static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
@@ -2277,6 +2274,33 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
2277 } 2274 }
2278} 2275}
2279 2276
2277static bool mmu_invalid_pfn(pfn_t pfn)
2278{
2279 return unlikely(is_invalid_pfn(pfn) || is_noslot_pfn(pfn));
2280}
2281
2282static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
2283 pfn_t pfn, unsigned access, int *ret_val)
2284{
2285 bool ret = true;
2286
2287 /* The pfn is invalid, report the error! */
2288 if (unlikely(is_invalid_pfn(pfn))) {
2289 *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
2290 goto exit;
2291 }
2292
2293 if (unlikely(is_noslot_pfn(pfn))) {
2294 vcpu_cache_mmio_info(vcpu, gva, gfn, access);
2295 *ret_val = 1;
2296 goto exit;
2297 }
2298
2299 ret = false;
2300exit:
2301 return ret;
2302}
2303
2280static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, 2304static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
2281 gva_t gva, pfn_t *pfn, bool write, bool *writable); 2305 gva_t gva, pfn_t *pfn, bool write, bool *writable);
2282 2306
@@ -2311,9 +2335,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
2311 if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) 2335 if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
2312 return 0; 2336 return 0;
2313 2337
2314 /* mmio */ 2338 if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
2315 if (is_error_pfn(pfn)) 2339 return r;
2316 return kvm_handle_bad_page(vcpu, v, ACC_ALL, gfn, pfn);
2317 2340
2318 spin_lock(&vcpu->kvm->mmu_lock); 2341 spin_lock(&vcpu->kvm->mmu_lock);
2319 if (mmu_notifier_retry(vcpu, mmu_seq)) 2342 if (mmu_notifier_retry(vcpu, mmu_seq))
@@ -2685,9 +2708,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
2685 if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) 2708 if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
2686 return 0; 2709 return 0;
2687 2710
2688 /* mmio */ 2711 if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
2689 if (is_error_pfn(pfn)) 2712 return r;
2690 return kvm_handle_bad_page(vcpu, 0, 0, gfn, pfn); 2713
2691 spin_lock(&vcpu->kvm->mmu_lock); 2714 spin_lock(&vcpu->kvm->mmu_lock);
2692 if (mmu_notifier_retry(vcpu, mmu_seq)) 2715 if (mmu_notifier_retry(vcpu, mmu_seq))
2693 goto out_unlock; 2716 goto out_unlock;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index a4565df501cd..67998d3be084 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -367,7 +367,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
367 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); 367 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
368 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true); 368 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true);
369 pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte)); 369 pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
370 if (is_error_pfn(pfn)) { 370 if (mmu_invalid_pfn(pfn)) {
371 kvm_release_pfn_clean(pfn); 371 kvm_release_pfn_clean(pfn);
372 return; 372 return;
373 } 373 }
@@ -445,7 +445,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
445 gfn = gpte_to_gfn(gpte); 445 gfn = gpte_to_gfn(gpte);
446 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, 446 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
447 pte_access & ACC_WRITE_MASK); 447 pte_access & ACC_WRITE_MASK);
448 if (is_error_pfn(pfn)) { 448 if (mmu_invalid_pfn(pfn)) {
449 kvm_release_pfn_clean(pfn); 449 kvm_release_pfn_clean(pfn);
450 break; 450 break;
451 } 451 }
@@ -615,10 +615,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
615 &map_writable)) 615 &map_writable))
616 return 0; 616 return 0;
617 617
618 /* mmio */ 618 if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr,
619 if (is_error_pfn(pfn)) 619 walker.gfn, pfn, walker.pte_access, &r))
620 return kvm_handle_bad_page(vcpu, mmu_is_nested(vcpu) ? 0 : 620 return r;
621 addr, walker.pte_access, walker.gfn, pfn); 621
622 spin_lock(&vcpu->kvm->mmu_lock); 622 spin_lock(&vcpu->kvm->mmu_lock);
623 if (mmu_notifier_retry(vcpu, mmu_seq)) 623 if (mmu_notifier_retry(vcpu, mmu_seq))
624 goto out_unlock; 624 goto out_unlock;