aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-06-06 07:48:06 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 23:40:20 -0400
commita9221dd5ec125fbec1702fae016c6d2ea1a9a3da (patch)
tree148607c0424c4ec993b55dd85eec69332a775b22 /arch/x86/kvm
parentce061867aa2877605cda96fa8ec7dff15f70a983 (diff)
KVM: MMU: Atomically check for accessed bit when dropping an spte
Currently, in the window between the check for the accessed bit, and actually dropping the spte, a vcpu can access the page through the spte and set the bit, which will be ignored by the mmu. Fix by using an exchange operation to atmoically fetch the spte and drop it. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/mmu.c28
1 files changed, 21 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index fbdca08b8d8c..ba2efcf2b86e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -288,6 +288,21 @@ static void __set_spte(u64 *sptep, u64 spte)
288#endif 288#endif
289} 289}
290 290
291static u64 __xchg_spte(u64 *sptep, u64 new_spte)
292{
293#ifdef CONFIG_X86_64
294 return xchg(sptep, new_spte);
295#else
296 u64 old_spte;
297
298 do {
299 old_spte = *sptep;
300 } while (cmpxchg64(sptep, old_spte, new_spte) != old_spte);
301
302 return old_spte;
303#endif
304}
305
291static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, 306static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
292 struct kmem_cache *base_cache, int min) 307 struct kmem_cache *base_cache, int min)
293{ 308{
@@ -653,18 +668,17 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
653static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte) 668static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
654{ 669{
655 pfn_t pfn; 670 pfn_t pfn;
671 u64 old_spte;
656 672
657 if (!is_rmap_spte(*sptep)) { 673 old_spte = __xchg_spte(sptep, new_spte);
658 __set_spte(sptep, new_spte); 674 if (!is_rmap_spte(old_spte))
659 return; 675 return;
660 } 676 pfn = spte_to_pfn(old_spte);
661 pfn = spte_to_pfn(*sptep); 677 if (old_spte & shadow_accessed_mask)
662 if (*sptep & shadow_accessed_mask)
663 kvm_set_pfn_accessed(pfn); 678 kvm_set_pfn_accessed(pfn);
664 if (is_writable_pte(*sptep)) 679 if (is_writable_pte(old_spte))
665 kvm_set_pfn_dirty(pfn); 680 kvm_set_pfn_dirty(pfn);
666 rmap_remove(kvm, sptep); 681 rmap_remove(kvm, sptep);
667 __set_spte(sptep, new_spte);
668} 682}
669 683
670static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) 684static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)