aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2010-07-15 23:28:09 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 23:41:00 -0400
commite4b502ead259fcf70839414abb7c8cdc3b523f01 (patch)
tree8ff58cadc3e2e95952af3ff5f08320dcaf706831 /arch/x86/kvm
parentbe233d49ea8c1fde9f4afec378dc2c2f16ab0263 (diff)
KVM: MMU: cleanup spte set and accssed/dirty tracking
Introduce set_spte_track_bits() to cleanup current code Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/mmu.c17
1 files changed, 8 insertions, 9 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9c7fae08291d..e4b862eb8885 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -679,7 +679,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
679 } 679 }
680} 680}
681 681
682static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte) 682static void set_spte_track_bits(u64 *sptep, u64 new_spte)
683{ 683{
684 pfn_t pfn; 684 pfn_t pfn;
685 u64 old_spte; 685 u64 old_spte;
@@ -692,6 +692,11 @@ static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
692 kvm_set_pfn_accessed(pfn); 692 kvm_set_pfn_accessed(pfn);
693 if (is_writable_pte(old_spte)) 693 if (is_writable_pte(old_spte))
694 kvm_set_pfn_dirty(pfn); 694 kvm_set_pfn_dirty(pfn);
695}
696
697static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
698{
699 set_spte_track_bits(sptep, new_spte);
695 rmap_remove(kvm, sptep); 700 rmap_remove(kvm, sptep);
696} 701}
697 702
@@ -791,7 +796,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
791 unsigned long data) 796 unsigned long data)
792{ 797{
793 int need_flush = 0; 798 int need_flush = 0;
794 u64 *spte, new_spte, old_spte; 799 u64 *spte, new_spte;
795 pte_t *ptep = (pte_t *)data; 800 pte_t *ptep = (pte_t *)data;
796 pfn_t new_pfn; 801 pfn_t new_pfn;
797 802
@@ -812,13 +817,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
812 new_spte &= ~PT_WRITABLE_MASK; 817 new_spte &= ~PT_WRITABLE_MASK;
813 new_spte &= ~SPTE_HOST_WRITEABLE; 818 new_spte &= ~SPTE_HOST_WRITEABLE;
814 new_spte &= ~shadow_accessed_mask; 819 new_spte &= ~shadow_accessed_mask;
815 if (is_writable_pte(*spte)) 820 set_spte_track_bits(spte, new_spte);
816 kvm_set_pfn_dirty(spte_to_pfn(*spte));
817 old_spte = __xchg_spte(spte, new_spte);
818 if (is_shadow_present_pte(old_spte)
819 && (!shadow_accessed_mask ||
820 old_spte & shadow_accessed_mask))
821 mark_page_accessed(pfn_to_page(spte_to_pfn(old_spte)));
822 spte = rmap_next(kvm, rmapp, spte); 821 spte = rmap_next(kvm, rmapp, spte);
823 } 822 }
824 } 823 }