aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2011-07-11 15:29:38 -0400
committerAvi Kivity <avi@redhat.com>2011-07-24 04:50:35 -0400
commitd7c55201e66e9f702db575c9dfc2d34a7af6cf1f (patch)
tree04ed0e93a16fbc7a50ee5a02586ae54cfdf68e39 /arch/x86/kvm/paging_tmpl.h
parentfce92dce79dbf5fff39c7ac2fb149729d79b7a39 (diff)
KVM: MMU: abstract some functions to handle fault pfn
Introduce handle_abnormal_pfn to handle fault pfn on page fault path, introduce mmu_invalid_pfn to handle fault pfn on prefetch path It is the preparing work for mmio page fault support Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h12
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index a4565df501cd..67998d3be084 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -367,7 +367,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
367 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); 367 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
368 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true); 368 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true);
369 pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte)); 369 pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
370 if (is_error_pfn(pfn)) { 370 if (mmu_invalid_pfn(pfn)) {
371 kvm_release_pfn_clean(pfn); 371 kvm_release_pfn_clean(pfn);
372 return; 372 return;
373 } 373 }
@@ -445,7 +445,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
445 gfn = gpte_to_gfn(gpte); 445 gfn = gpte_to_gfn(gpte);
446 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, 446 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
447 pte_access & ACC_WRITE_MASK); 447 pte_access & ACC_WRITE_MASK);
448 if (is_error_pfn(pfn)) { 448 if (mmu_invalid_pfn(pfn)) {
449 kvm_release_pfn_clean(pfn); 449 kvm_release_pfn_clean(pfn);
450 break; 450 break;
451 } 451 }
@@ -615,10 +615,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
615 &map_writable)) 615 &map_writable))
616 return 0; 616 return 0;
617 617
618 /* mmio */ 618 if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr,
619 if (is_error_pfn(pfn)) 619 walker.gfn, pfn, walker.pte_access, &r))
620 return kvm_handle_bad_page(vcpu, mmu_is_nested(vcpu) ? 0 : 620 return r;
621 addr, walker.pte_access, walker.gfn, pfn); 621
622 spin_lock(&vcpu->kvm->mmu_lock); 622 spin_lock(&vcpu->kvm->mmu_lock);
623 if (mmu_notifier_retry(vcpu, mmu_seq)) 623 if (mmu_notifier_retry(vcpu, mmu_seq))
624 goto out_unlock; 624 goto out_unlock;