aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/mmu.c10
1 files changed, 3 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9b3b916ebeae..a04756a26fe2 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1985,6 +1985,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1985 mark_page_dirty(vcpu->kvm, gfn); 1985 mark_page_dirty(vcpu->kvm, gfn);
1986 1986
1987set_pte: 1987set_pte:
1988 if (is_writable_pte(*sptep) && !is_writable_pte(spte))
1989 kvm_set_pfn_dirty(pfn);
1988 update_spte(sptep, spte); 1990 update_spte(sptep, spte);
1989done: 1991done:
1990 return ret; 1992 return ret;
@@ -1998,7 +2000,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1998 bool reset_host_protection) 2000 bool reset_host_protection)
1999{ 2001{
2000 int was_rmapped = 0; 2002 int was_rmapped = 0;
2001 int was_writable = is_writable_pte(*sptep);
2002 int rmap_count; 2003 int rmap_count;
2003 2004
2004 pgprintk("%s: spte %llx access %x write_fault %d" 2005 pgprintk("%s: spte %llx access %x write_fault %d"
@@ -2048,15 +2049,10 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2048 page_header_update_slot(vcpu->kvm, sptep, gfn); 2049 page_header_update_slot(vcpu->kvm, sptep, gfn);
2049 if (!was_rmapped) { 2050 if (!was_rmapped) {
2050 rmap_count = rmap_add(vcpu, sptep, gfn); 2051 rmap_count = rmap_add(vcpu, sptep, gfn);
2051 kvm_release_pfn_clean(pfn);
2052 if (rmap_count > RMAP_RECYCLE_THRESHOLD) 2052 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
2053 rmap_recycle(vcpu, sptep, gfn); 2053 rmap_recycle(vcpu, sptep, gfn);
2054 } else {
2055 if (was_writable)
2056 kvm_release_pfn_dirty(pfn);
2057 else
2058 kvm_release_pfn_clean(pfn);
2059 } 2054 }
2055 kvm_release_pfn_clean(pfn);
2060 if (speculative) { 2056 if (speculative) {
2061 vcpu->arch.last_pte_updated = sptep; 2057 vcpu->arch.last_pte_updated = sptep;
2062 vcpu->arch.last_pte_gfn = gfn; 2058 vcpu->arch.last_pte_gfn = gfn;