aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-06-06 07:38:12 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 23:40:18 -0400
commitce061867aa2877605cda96fa8ec7dff15f70a983 (patch)
tree690b9723b06c2f62586f5a74121fe16f3e2e1caa /arch/x86/kvm/mmu.c
parentbe38d276b0189fa86231fc311428622a1981ad62 (diff)
KVM: MMU: Move accessed/dirty bit checks from rmap_remove() to drop_spte()
Since we need to make the check atomic, move it to the place that will set the new spte. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 1ad39cf70e1..fbdca08b8d8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -612,19 +612,11 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
612 struct kvm_rmap_desc *desc; 612 struct kvm_rmap_desc *desc;
613 struct kvm_rmap_desc *prev_desc; 613 struct kvm_rmap_desc *prev_desc;
614 struct kvm_mmu_page *sp; 614 struct kvm_mmu_page *sp;
615 pfn_t pfn;
616 gfn_t gfn; 615 gfn_t gfn;
617 unsigned long *rmapp; 616 unsigned long *rmapp;
618 int i; 617 int i;
619 618
620 if (!is_rmap_spte(*spte))
621 return;
622 sp = page_header(__pa(spte)); 619 sp = page_header(__pa(spte));
623 pfn = spte_to_pfn(*spte);
624 if (*spte & shadow_accessed_mask)
625 kvm_set_pfn_accessed(pfn);
626 if (is_writable_pte(*spte))
627 kvm_set_pfn_dirty(pfn);
628 gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); 620 gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
629 rmapp = gfn_to_rmap(kvm, gfn, sp->role.level); 621 rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
630 if (!*rmapp) { 622 if (!*rmapp) {
@@ -660,6 +652,17 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
660 652
661static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte) 653static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
662{ 654{
655 pfn_t pfn;
656
657 if (!is_rmap_spte(*sptep)) {
658 __set_spte(sptep, new_spte);
659 return;
660 }
661 pfn = spte_to_pfn(*sptep);
662 if (*sptep & shadow_accessed_mask)
663 kvm_set_pfn_accessed(pfn);
664 if (is_writable_pte(*sptep))
665 kvm_set_pfn_dirty(pfn);
663 rmap_remove(kvm, sptep); 666 rmap_remove(kvm, sptep);
664 __set_spte(sptep, new_spte); 667 __set_spte(sptep, new_spte);
665} 668}