aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 70e95d097ef1..1ff4dbb73fb7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -557,8 +557,15 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
557 !is_writable_pte(new_spte)) 557 !is_writable_pte(new_spte))
558 ret = true; 558 ret = true;
559 559
560 if (!shadow_accessed_mask) 560 if (!shadow_accessed_mask) {
561 /*
562 * We don't set page dirty when dropping non-writable spte.
563 * So do it now if the new spte is becoming non-writable.
564 */
565 if (ret)
566 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
561 return ret; 567 return ret;
568 }
562 569
563 /* 570 /*
564 * Flush TLB when accessed/dirty bits are changed in the page tables, 571 * Flush TLB when accessed/dirty bits are changed in the page tables,
@@ -605,7 +612,8 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
605 612
606 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) 613 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
607 kvm_set_pfn_accessed(pfn); 614 kvm_set_pfn_accessed(pfn);
608 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) 615 if (old_spte & (shadow_dirty_mask ? shadow_dirty_mask :
616 PT_WRITABLE_MASK))
609 kvm_set_pfn_dirty(pfn); 617 kvm_set_pfn_dirty(pfn);
610 return 1; 618 return 1;
611} 619}