aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2010-08-02 04:15:08 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:50:32 -0400
commit4132779b1718f066ec2d06a71c8958039865cd49 (patch)
tree4be0a4d2cf26277a75db5a7aa74c7983a99fc344 /arch/x86/kvm/mmu.c
parent8672b7217a234c41d425a63b171af809e1169842 (diff)
KVM: MMU: mark page dirty only when page is really written
Mark page dirty only when this page is really written, it's more exacter, and also can fix dirty page marking in speculation path Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c47
1 files changed, 28 insertions, 19 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c07b9a200bc8..ff95d418750d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -307,24 +307,42 @@ static bool spte_has_volatile_bits(u64 spte)
307 if (!is_shadow_present_pte(spte)) 307 if (!is_shadow_present_pte(spte))
308 return false; 308 return false;
309 309
310 if (spte & shadow_accessed_mask) 310 if ((spte & shadow_accessed_mask) &&
311 (!is_writable_pte(spte) || (spte & shadow_dirty_mask)))
311 return false; 312 return false;
312 313
313 return true; 314 return true;
314} 315}
315 316
317static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask)
318{
319 return (old_spte & bit_mask) && !(new_spte & bit_mask);
320}
321
316static void update_spte(u64 *sptep, u64 new_spte) 322static void update_spte(u64 *sptep, u64 new_spte)
317{ 323{
318 u64 old_spte; 324 u64 mask, old_spte = *sptep;
325
326 WARN_ON(!is_rmap_spte(new_spte));
319 327
320 if (!shadow_accessed_mask || (new_spte & shadow_accessed_mask) || 328 new_spte |= old_spte & shadow_dirty_mask;
321 !is_rmap_spte(*sptep)) 329
330 mask = shadow_accessed_mask;
331 if (is_writable_pte(old_spte))
332 mask |= shadow_dirty_mask;
333
334 if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask)
322 __set_spte(sptep, new_spte); 335 __set_spte(sptep, new_spte);
323 else { 336 else
324 old_spte = __xchg_spte(sptep, new_spte); 337 old_spte = __xchg_spte(sptep, new_spte);
325 if (old_spte & shadow_accessed_mask) 338
326 kvm_set_pfn_accessed(spte_to_pfn(old_spte)); 339 if (!shadow_accessed_mask)
327 } 340 return;
341
342 if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
343 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
344 if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))
345 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
328} 346}
329 347
330static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, 348static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
@@ -704,7 +722,7 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte)
704 pfn = spte_to_pfn(old_spte); 722 pfn = spte_to_pfn(old_spte);
705 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) 723 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
706 kvm_set_pfn_accessed(pfn); 724 kvm_set_pfn_accessed(pfn);
707 if (is_writable_pte(old_spte)) 725 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
708 kvm_set_pfn_dirty(pfn); 726 kvm_set_pfn_dirty(pfn);
709} 727}
710 728
@@ -759,13 +777,6 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
759 } 777 }
760 spte = rmap_next(kvm, rmapp, spte); 778 spte = rmap_next(kvm, rmapp, spte);
761 } 779 }
762 if (write_protected) {
763 pfn_t pfn;
764
765 spte = rmap_next(kvm, rmapp, NULL);
766 pfn = spte_to_pfn(*spte);
767 kvm_set_pfn_dirty(pfn);
768 }
769 780
770 /* check for huge page mappings */ 781 /* check for huge page mappings */
771 for (i = PT_DIRECTORY_LEVEL; 782 for (i = PT_DIRECTORY_LEVEL;
@@ -1938,7 +1949,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1938 * whether the guest actually used the pte (in order to detect 1949 * whether the guest actually used the pte (in order to detect
1939 * demand paging). 1950 * demand paging).
1940 */ 1951 */
1941 spte = shadow_base_present_pte | shadow_dirty_mask; 1952 spte = shadow_base_present_pte;
1942 if (!speculative) 1953 if (!speculative)
1943 spte |= shadow_accessed_mask; 1954 spte |= shadow_accessed_mask;
1944 if (!dirty) 1955 if (!dirty)
@@ -1999,8 +2010,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1999 mark_page_dirty(vcpu->kvm, gfn); 2010 mark_page_dirty(vcpu->kvm, gfn);
2000 2011
2001set_pte: 2012set_pte:
2002 if (is_writable_pte(*sptep) && !is_writable_pte(spte))
2003 kvm_set_pfn_dirty(pfn);
2004 update_spte(sptep, spte); 2013 update_spte(sptep, spte);
2005done: 2014done:
2006 return ret; 2015 return ret;