diff options
| -rw-r--r-- | arch/x86/kvm/mmu.c | 15 |
1 files changed, 7 insertions, 8 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 03323dc705c2..02c839f40e29 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -743,9 +743,6 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) | |||
| 743 | struct kvm_mmu_page *sp; | 743 | struct kvm_mmu_page *sp; |
| 744 | unsigned long *rmapp; | 744 | unsigned long *rmapp; |
| 745 | 745 | ||
| 746 | if (!is_rmap_spte(*spte)) | ||
| 747 | return 0; | ||
| 748 | |||
| 749 | sp = page_header(__pa(spte)); | 746 | sp = page_header(__pa(spte)); |
| 750 | kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); | 747 | kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); |
| 751 | rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); | 748 | rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); |
| @@ -2087,11 +2084,13 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
| 2087 | if (!was_rmapped && is_large_pte(*sptep)) | 2084 | if (!was_rmapped && is_large_pte(*sptep)) |
| 2088 | ++vcpu->kvm->stat.lpages; | 2085 | ++vcpu->kvm->stat.lpages; |
| 2089 | 2086 | ||
| 2090 | page_header_update_slot(vcpu->kvm, sptep, gfn); | 2087 | if (is_shadow_present_pte(*sptep)) { |
| 2091 | if (!was_rmapped) { | 2088 | page_header_update_slot(vcpu->kvm, sptep, gfn); |
| 2092 | rmap_count = rmap_add(vcpu, sptep, gfn); | 2089 | if (!was_rmapped) { |
| 2093 | if (rmap_count > RMAP_RECYCLE_THRESHOLD) | 2090 | rmap_count = rmap_add(vcpu, sptep, gfn); |
| 2094 | rmap_recycle(vcpu, sptep, gfn); | 2091 | if (rmap_count > RMAP_RECYCLE_THRESHOLD) |
| 2092 | rmap_recycle(vcpu, sptep, gfn); | ||
| 2093 | } | ||
| 2095 | } | 2094 | } |
| 2096 | kvm_release_pfn_clean(pfn); | 2095 | kvm_release_pfn_clean(pfn); |
| 2097 | if (speculative) { | 2096 | if (speculative) { |
