diff options
author | Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> | 2014-04-17 05:06:13 -0400 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2014-04-23 16:49:49 -0400 |
commit | 92a476cbfc476c63ee982dd33d15a8c88b4d51b9 (patch) | |
tree | 799bba57656b27699674a51381d489ffa62dcb2b /arch/x86/kvm | |
parent | a086f6a1ebc9d8d2d028b99e779ce0dbd9691dea (diff) |
KVM: MMU: properly check last spte in fast_page_fault()
Using sp->role.level instead of @level since @level is not got from the
page table hierarchy
There is no issue in current code since the fast page fault currently only
fixes the fault caused by dirty-log that is always on the last level
(level = 1)
This patch makes the code more readable and avoids potential issue in the
further development
Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/mmu.c | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 668ae5916de9..63107049249d 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2802,9 +2802,9 @@ static bool page_fault_can_be_fast(u32 error_code) | |||
2802 | } | 2802 | } |
2803 | 2803 | ||
2804 | static bool | 2804 | static bool |
2805 | fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 spte) | 2805 | fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
2806 | u64 *sptep, u64 spte) | ||
2806 | { | 2807 | { |
2807 | struct kvm_mmu_page *sp = page_header(__pa(sptep)); | ||
2808 | gfn_t gfn; | 2808 | gfn_t gfn; |
2809 | 2809 | ||
2810 | WARN_ON(!sp->role.direct); | 2810 | WARN_ON(!sp->role.direct); |
@@ -2830,6 +2830,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, | |||
2830 | u32 error_code) | 2830 | u32 error_code) |
2831 | { | 2831 | { |
2832 | struct kvm_shadow_walk_iterator iterator; | 2832 | struct kvm_shadow_walk_iterator iterator; |
2833 | struct kvm_mmu_page *sp; | ||
2833 | bool ret = false; | 2834 | bool ret = false; |
2834 | u64 spte = 0ull; | 2835 | u64 spte = 0ull; |
2835 | 2836 | ||
@@ -2853,7 +2854,8 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, | |||
2853 | goto exit; | 2854 | goto exit; |
2854 | } | 2855 | } |
2855 | 2856 | ||
2856 | if (!is_last_spte(spte, level)) | 2857 | sp = page_header(__pa(iterator.sptep)); |
2858 | if (!is_last_spte(spte, sp->role.level)) | ||
2857 | goto exit; | 2859 | goto exit; |
2858 | 2860 | ||
2859 | /* | 2861 | /* |
@@ -2879,7 +2881,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, | |||
2879 | * the gfn is not stable for indirect shadow page. | 2881 | * the gfn is not stable for indirect shadow page. |
2880 | * See Documentation/virtual/kvm/locking.txt to get more detail. | 2882 | * See Documentation/virtual/kvm/locking.txt to get more detail. |
2881 | */ | 2883 | */ |
2882 | ret = fast_pf_fix_direct_spte(vcpu, iterator.sptep, spte); | 2884 | ret = fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte); |
2883 | exit: | 2885 | exit: |
2884 | trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep, | 2886 | trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep, |
2885 | spte, ret); | 2887 | spte, ret); |