aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-06-06 08:46:44 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 23:40:21 -0400
commitb79b93f92cb3b66b89d75525fdfd2454b1e1f446 (patch)
tree0ca735431e67c58ff4b69578dc56bdd6e875eebb
parenta9221dd5ec125fbec1702fae016c6d2ea1a9a3da (diff)
KVM: MMU: Don't drop accessed bit while updating an spte
__set_spte() will happily replace an spte with the accessed bit set with one that has the accessed bit clear. Add a helper update_spte() which checks for this condition and updates the page flag if needed. Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/kvm/mmu.c25
1 files changed, 21 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ba2efcf2b86e..d8d48329cb82 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -303,6 +303,19 @@ static u64 __xchg_spte(u64 *sptep, u64 new_spte)
303#endif 303#endif
304} 304}
305 305
306static void update_spte(u64 *sptep, u64 new_spte)
307{
308 u64 old_spte;
309
310 if (!shadow_accessed_mask || (new_spte & shadow_accessed_mask)) {
311 __set_spte(sptep, new_spte);
312 } else {
313 old_spte = __xchg_spte(sptep, new_spte);
314 if (old_spte & shadow_accessed_mask)
315 mark_page_accessed(pfn_to_page(spte_to_pfn(old_spte)));
316 }
317}
318
306static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, 319static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
307 struct kmem_cache *base_cache, int min) 320 struct kmem_cache *base_cache, int min)
308{ 321{
@@ -721,7 +734,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
721 BUG_ON(!(*spte & PT_PRESENT_MASK)); 734 BUG_ON(!(*spte & PT_PRESENT_MASK));
722 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); 735 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
723 if (is_writable_pte(*spte)) { 736 if (is_writable_pte(*spte)) {
724 __set_spte(spte, *spte & ~PT_WRITABLE_MASK); 737 update_spte(spte, *spte & ~PT_WRITABLE_MASK);
725 write_protected = 1; 738 write_protected = 1;
726 } 739 }
727 spte = rmap_next(kvm, rmapp, spte); 740 spte = rmap_next(kvm, rmapp, spte);
@@ -777,7 +790,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
777 unsigned long data) 790 unsigned long data)
778{ 791{
779 int need_flush = 0; 792 int need_flush = 0;
780 u64 *spte, new_spte; 793 u64 *spte, new_spte, old_spte;
781 pte_t *ptep = (pte_t *)data; 794 pte_t *ptep = (pte_t *)data;
782 pfn_t new_pfn; 795 pfn_t new_pfn;
783 796
@@ -797,9 +810,13 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
797 810
798 new_spte &= ~PT_WRITABLE_MASK; 811 new_spte &= ~PT_WRITABLE_MASK;
799 new_spte &= ~SPTE_HOST_WRITEABLE; 812 new_spte &= ~SPTE_HOST_WRITEABLE;
813 new_spte &= ~shadow_accessed_mask;
800 if (is_writable_pte(*spte)) 814 if (is_writable_pte(*spte))
801 kvm_set_pfn_dirty(spte_to_pfn(*spte)); 815 kvm_set_pfn_dirty(spte_to_pfn(*spte));
802 __set_spte(spte, new_spte); 816 old_spte = __xchg_spte(spte, new_spte);
817 if (is_shadow_present_pte(old_spte)
818 && (old_spte & shadow_accessed_mask))
819 mark_page_accessed(pfn_to_page(spte_to_pfn(old_spte)));
803 spte = rmap_next(kvm, rmapp, spte); 820 spte = rmap_next(kvm, rmapp, spte);
804 } 821 }
805 } 822 }
@@ -1922,7 +1939,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1922 mark_page_dirty(vcpu->kvm, gfn); 1939 mark_page_dirty(vcpu->kvm, gfn);
1923 1940
1924set_pte: 1941set_pte:
1925 __set_spte(sptep, spte); 1942 update_spte(sptep, spte);
1926done: 1943done:
1927 return ret; 1944 return ret;
1928} 1945}